gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import unicode_literals, division, print_function
"""
Helper methods for generating gw input / and work flows.
"""
__author__ = "Michiel van Setten"
__copyright__ = " "
__version__ = "0.9"
__maintainer__ = "Michiel van Setten"
__email__ = "[email protected]"
__date__ = "May 2014"
import time
import os
import ast
import copy
import math
import shutil
import numpy as np
from pymatgen.core.units import eV_to_Ha
from pymatgen.symmetry.analyzer import SpacegroupAnalyzer
from pymatgen.transformations.standard_transformations import OxidationStateRemovalTransformation, \
PrimitiveCellTransformation, SupercellTransformation
def now():
"""
helper to return a time string
"""
return time.strftime("%H:%M:%S %d/%m/%Y")
def read_extra_abivars():
ea = {}
if os.path.isfile('extra_abivars'):
f = open('extra_abivars')
ea = ast.literal_eval(f.read())
if not isinstance(ea, dict):
raise RuntimeError
return ea
def refine_structure(structure, symprec=1e-3):
remove_ox = OxidationStateRemovalTransformation()
structure = remove_ox.apply_transformation(structure)
sym_finder = SpacegroupAnalyzer(structure=structure, symprec=symprec)
structure = sym_finder.get_refined_structure()
get_prim = PrimitiveCellTransformation()
structure = get_prim.apply_transformation(structure)
m = structure.lattice.matrix
x_prod = np.dot(np.cross(m[0], m[1]), m[2])
if x_prod < 0:
print(x_prod)
trans = SupercellTransformation(((1, 0, 0), (0, 0, 1), (0, 1, 0)))
structure = trans.apply_transformation(structure)
m = structure.lattice.matrix
x_prod = np.dot(np.cross(m[0], m[1]), m[2])
print(x_prod)
if x_prod < 0:
raise RuntimeError
return structure
def s_name(structure):
if os.path.isfile('old'):
name_ = str(structure.composition.reduced_formula)
else:
name_ = str(structure.composition.reduced_formula) + '_' + str(structure.item)
return name_
def clean(some_string, uppercase=False):
"""
helper to clean up an input string
"""
if uppercase:
return some_string.strip().upper()
else:
return some_string.strip().lower()
def expand(tests, level):
from gw.codeinterfaces import get_all_ecuteps, get_all_nbands
new_tests = copy.deepcopy(tests)
for test in tests.keys():
if test in get_all_ecuteps():
ec = str(test)
ec_range = tests[ec]['test_range']
ec_step = ec_range[-1] - ec_range[-2]
if int(level / 2) == level / 2:
print('new ec wedge')
# even level of grid extension > new ec wedge
new_ec_range = (ec_range[-1] + int(level / 2 * ec_step),)
else:
print('new nb wedge')
# odd level of grid extension > new nb wedge
extension = tuple(range(ec_range[-1] + ec_step, ec_range[-1] + (1 + int((level - 1) / 2)) * ec_step, ec_step))
new_ec_range = ec_range + extension
new_tests[ec].update({'test_range': new_ec_range})
if test in get_all_nbands():
nb = str(test)
nb_range = tests[nb]['test_range']
nb_step = nb_range[-1] - nb_range[-2]
print(nb_step)
if int(level / 2) == level / 2:
# even level of grid extension > new ec wedge
extension = tuple(range(nb_range[-1] + nb_step, nb_range[-1] + (1 + int(level / 2)) * nb_step, nb_step))
new_nb_range = nb_range + extension
else:
# odd level of grid extension > new nb wedge
new_nb_range = (nb_range[-1] + int((level + 1) / 2 * nb_step),)
new_tests[nb].update({'test_range': new_nb_range})
print(new_tests)
return new_tests
def print_gnuplot_header(filename, title='', mode='convplot', filetype='jpeg'):
xl = 'set xlabel "nbands"\n'
yl = 'set ylabel "ecuteps (eV)"\n'
zl = 'set zlabel "gap (eV)"\n'
if mode == 'convplot':
f = open(filename, mode='a')
if filetype is not None:
f.write('set terminal '+filetype+'\n')
f.write('set title "'+title+'"\n')
f.write(xl)
f.write(yl)
f.write(zl)
f.close()
def read_grid_from_file(filename):
"""
Read the results of a full set of calculations from file
"""
try:
f = open(filename, mode='r')
full_res = ast.literal_eval(f.read())
f.close()
except SyntaxError:
print('Problems reading ', filename)
full_res = {'grid': 0, 'all_done': False}
except (OSError, IOError):
full_res = {'grid': 0, 'all_done': False}
return full_res
def is_converged(hartree_parameters, structure, return_values=False):
filename = s_name(structure) + ".conv_res"
to_return = {}
try:
f = open(filename, mode='r')
conv_res = ast.literal_eval(f.read())
f.close()
converged = True if True in conv_res['control'].values() else False
except (IOError, OSError, ValueError):
if return_values:
print('Inputfile ', filename, ' not found, the convergence calculation did not finish properly' \
' or was not parsed ...')
converged = False
return converged
if return_values and converged:
if hartree_parameters:
try:
conv_res['values']['ecut'] = 4 * math.ceil(conv_res['values']['ecut'] * eV_to_Ha / 4)
except (KeyError, ArithmeticError, FloatingPointError, SyntaxError):
pass
try:
conv_res['values']['ecuteps'] = 4 * math.ceil(conv_res['values']['ecuteps'] * eV_to_Ha / 4)
except (KeyError, ArithmeticError, FloatingPointError, SyntaxError):
pass
for k in conv_res['values'].keys():
if conv_res['values'][k] != 0 and conv_res['values'][k] != np.inf:
to_return.update({k: conv_res['values'][k]})
return to_return
else:
return converged
def store_conv_results(name, folder):
print("| Storing results for %s" % name)
if not os.path.isdir(folder):
os.mkdir(folder)
shutil.copy(name+'.full_res', os.path.join(folder, name+'.full_res'))
for data_file in ['conv_res', 'log', 'conv.log', 'str', 'fitdat', 'convdat', 'data']:
try:
os.rename(name+'.'+data_file, os.path.join(folder, name+'.'+data_file))
except OSError:
pass
def add_gg_gap(structure):
structure.vbm_l = "G"
structure.cbm_l = "G"
structure.cbm = (0.0, 0.0, 0.0)
structure.vbm = (0.0, 0.0, 0.0)
return structure
|
|
#
# Copyright (C) 2000-2008 greg Landrum
#
""" Training algorithms for feed-forward neural nets
Unless noted otherwise, algorithms and notation are taken from:
"Artificial Neural Networks: Theory and Applications",
Dan W. Patterson, Prentice Hall, 1996
"""
import numpy
class Trainer(object):
""" "virtual base class" for network trainers
"""
pass
class BackProp(Trainer):
"""implement back propagation (algorithm on pp 153-154 of Patterson)
I don't *think* that I've made any assumptions about the connectivity of
the net (i.e. full connectivity between layers is not required).
**NOTE:** this code is currently making the assumption that the activation
functions on the nodes in the network are capable of calculating their
derivatives using only their values (i.e. a DerivFromVal method should
exist). This shouldn't be too hard to change.
"""
def StepUpdate(self, example, net, resVect=None):
""" does a BackProp step based upon the example
**Arguments**
- example: a 2-tuple:
1) a list of variable values values
2) a list of result values (targets)
- net: a _Network_ (or something supporting the same API)
- resVect: if this is nonzero, then the network is not required to
classify the _example_
**Returns**
the backprop error from _network_ **before the update**
**Note**
In case it wasn't blindingly obvious, the weights in _network_ are modified
in the course of taking a backprop step.
"""
totNumNodes = net.GetNumNodes()
if self.oldDeltaW is None:
self.oldDeltaW = numpy.zeros(totNumNodes, numpy.float64)
outputNodeList = net.GetOutputNodeList()
nOutput = len(outputNodeList)
targetVect = numpy.array(example[-nOutput:], numpy.float64)
trainVect = example[:-nOutput]
if resVect is None:
# classify the example
net.ClassifyExample(trainVect)
resVect = net.GetLastOutputs()
outputs = numpy.take(resVect, outputNodeList)
errVect = targetVect - outputs
delta = numpy.zeros(totNumNodes, numpy.float64)
# start with the output layer
for i in range(len(outputNodeList)):
idx = outputNodeList[i]
node = net.GetNode(idx)
# the deltas here are easy
delta[idx] = errVect[i] * node.actFunc.DerivFromVal(resVect[idx])
# use these results to start working on the deltas of the preceding layer
inputs = node.GetInputs()
weights = delta[idx] * node.GetWeights()
for j in range(len(inputs)):
idx2 = inputs[j]
delta[idx2] = delta[idx2] + weights[j]
# now propagate the deltas backwards
for layer in range(net.GetNumHidden() - 1, -1, -1):
nodesInLayer = net.GetHiddenLayerNodeList(layer)
for idx in nodesInLayer:
node = net.GetNode(idx)
# start by finishing off the error term for this guy
delta[idx] = delta[idx] * node.actFunc.DerivFromVal(resVect[idx])
# and then propagate our errors to the preceding layer
if layer != 0:
inputs = node.GetInputs()
weights = delta[idx] * node.GetWeights()
for i in range(len(inputs)):
idx2 = inputs[i]
delta[idx2] = delta[idx2] + weights[i]
# okey dokey... we've now got the deltas for each node, use those
# to update the weights (whew!)
nHidden = net.GetNumHidden()
for layer in range(0, nHidden + 1):
if layer == nHidden:
idxList = net.GetOutputNodeList()
else:
idxList = net.GetHiddenLayerNodeList(layer)
for idx in idxList:
node = net.GetNode(idx)
dW = self.speed * delta[idx] * numpy.take(resVect, node.GetInputs())
newWeights = node.GetWeights() + dW
node.SetWeights(newWeights)
# return the RMS error from the OLD network
return numpy.sqrt(errVect * errVect)[0]
def TrainOnLine(self, examples, net, maxIts=5000, errTol=0.1, useAvgErr=1, silent=0):
""" carries out online training of a neural net
The definition of online training is that the network is updated after
each example is presented.
**Arguments**
- examples: a list of 2-tuple:
1) a list of variable values values
2) a list of result values (targets)
- net: a _Network_ (or something supporting the same API)
- maxIts: the maximum number of *training epochs* (see below for definition) to be
run
- errTol: the tolerance for convergence
- useAvgErr: if this toggle is nonzero, then the error at each step will be
divided by the number of training examples for the purposes of checking
convergence.
- silent: controls the amount of visual noise produced as this runs.
**Note**
a *training epoch* is one complete pass through all the training examples
"""
nExamples = len(examples)
converged = 0
cycle = 0
while (not converged) and (cycle < maxIts):
maxErr = 0
newErr = 0
# print('bp: ',cycle)
for example in examples:
localErr = self.StepUpdate(example, net)
newErr += localErr
if localErr > maxErr:
maxErr = localErr
if useAvgErr == 1:
newErr = newErr / nExamples
else:
newErr = maxErr
# print('\t',newErr,errTol)
if newErr <= errTol:
converged = 1
# if cycle % 10 == 0 and not silent:
if not silent:
print('epoch %d, error: % 6.4f' % (cycle, newErr))
cycle = cycle + 1
if not silent:
if converged:
print('Converged after %d epochs.' % cycle)
else:
print('NOT Converged after %d epochs.' % cycle)
print('final error: % 6.4f' % newErr)
def __init__(self, speed=0.5, momentum=0.7):
""" Constructor
**Arguments**
- speed: the speed parameter for back prop training
- momentum: the momentum term for back prop training
*Not currently used*
"""
self.speed = speed
self.momentum = momentum
self.oldDeltaW = None
if __name__ == '__main__': # pragma: nocover
from rdkit.ML.Neural import Network
def testAnd():
examples = [[[0, 0, 1], [0.1]], [[0, 1, 1], [.1]], [[1, 0, 1], [.1]], [[1, 1, 1], [.9]]]
net = Network.Network([3, 1])
t = BackProp()
t.TrainOnLine(examples, net)
return net
def testOr():
examples = [[[0, 0, 1], [0.1]], [[0, 1, 1], [.9]], [[1, 0, 1], [.9]], [[1, 1, 1], [.9]]]
net = Network.Network([3, 1])
t = BackProp()
t.TrainOnLine(examples, net, maxIts=1000, useAvgErr=0)
print('classifications:')
for example in examples:
res = net.ClassifyExample(example[0])
print('%f -> %f' % (example[1][0], res))
return net
def testXor():
examples = [[[0, 0, 1], [.1]], [[0, 1, 1], [.9]], [[1, 0, 1], [.9]], [[1, 1, 1], [.1]]]
net = Network.Network([3, 3, 1])
t = BackProp(speed=.8)
t.TrainOnLine(examples, net, errTol=0.2)
return net
def testLinear():
examples = [
[.1, .1],
[.2, .2],
[.3, .3],
[.4, .4],
[.8, .8],
]
net = Network.Network([1, 2, 1])
t = BackProp(speed=.8)
t.TrainOnLine(examples, net, errTol=0.1, useAvgErr=0)
print('classifications:')
for example in examples:
res = net.ClassifyExample(example[:-1])
print('%f -> %f' % (example[-1], res))
return net
def runProfile(command):
import random
random.seed(23)
import profile
import pstats
datFile = '%s.prof.dat' % (command)
profile.run('%s()' % command, datFile)
stats = pstats.Stats(datFile)
stats.strip_dirs()
stats.sort_stats('time').print_stats()
if 0:
net = testXor()
print('Xor:', net)
import pickle
outF = open('xornet.pkl', 'wb+')
pickle.dump(net, outF)
outF.close()
else:
# runProfile('testLinear')
net = testLinear()
# net = testOr()
|
|
# -*- coding: utf-8 -*-
from operator import attrgetter
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType
from pyangbind.lib.yangtypes import RestrictedClassType
from pyangbind.lib.yangtypes import TypedListType
from pyangbind.lib.yangtypes import YANGBool
from pyangbind.lib.yangtypes import YANGListType
from pyangbind.lib.yangtypes import YANGDynClass
from pyangbind.lib.yangtypes import ReferenceType
from pyangbind.lib.base import PybindBase
from collections import OrderedDict
from decimal import Decimal
from bitarray import bitarray
import six
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
from . import config
from . import state
class ibgp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/use-multiple-paths/ibgp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Multipath parameters for iBGP
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "ibgp"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"use-multiple-paths",
"ibgp",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/config (container)
YANG Description: Configuration parameters relating to iBGP multipath
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to iBGP multipath
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/state (container)
YANG Description: State information relating to iBGP multipath
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to iBGP multipath
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
from . import config
from . import state
class ibgp(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/bgp/peer-groups/peer-group/use-multiple-paths/ibgp. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: Multipath parameters for iBGP
"""
__slots__ = ("_path_helper", "_extmethods", "__config", "__state")
_yang_name = "ibgp"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"bgp",
"peer-groups",
"peer-group",
"use-multiple-paths",
"ibgp",
]
def _get_config(self):
"""
Getter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/config (container)
YANG Description: Configuration parameters relating to iBGP multipath
"""
return self.__config
def _set_config(self, v, load=False):
"""
Setter method for config, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/config (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_config is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_config() directly.
YANG Description: Configuration parameters relating to iBGP multipath
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """config must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=config.config, is_container='container', yang_name="config", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__config = t
if hasattr(self, "_set"):
self._set()
def _unset_config(self):
self.__config = YANGDynClass(
base=config.config,
is_container="container",
yang_name="config",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
def _get_state(self):
"""
Getter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/state (container)
YANG Description: State information relating to iBGP multipath
"""
return self.__state
def _set_state(self, v, load=False):
"""
Setter method for state, mapped from YANG variable /network_instances/network_instance/protocols/protocol/bgp/peer_groups/peer_group/use_multiple_paths/ibgp/state (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_state is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_state() directly.
YANG Description: State information relating to iBGP multipath
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """state must be of a type compatible with container""",
"defined-type": "container",
"generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='container', is_config=True)""",
}
)
self.__state = t
if hasattr(self, "_set"):
self._set()
def _unset_state(self):
self.__state = YANGDynClass(
base=state.state,
is_container="container",
yang_name="state",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="container",
is_config=True,
)
config = __builtin__.property(_get_config, _set_config)
state = __builtin__.property(_get_state, _set_state)
_pyangbind_elements = OrderedDict([("config", config), ("state", state)])
|
|
import csv
import datetime
import json
import os
import StringIO
from time import sleep
from django.conf import settings
from django.core.files.storage import get_storage_class
from django.core.urlresolvers import reverse
from django.utils.dateparse import parse_datetime
from xlrd import open_workbook
from onadata.apps.main.views import delete_data
from onadata.apps.main.tests.test_base import TestBase
from onadata.apps.viewer.tests.export_helpers import viewer_fixture_path
from onadata.apps.viewer.views import delete_export, export_list,\
create_export, export_progress, export_download
from onadata.apps.viewer.xls_writer import XlsWriter
from onadata.apps.viewer.models.export import Export
from onadata.apps.main.models.meta_data import MetaData
from onadata.apps.viewer.models.parsed_instance import ParsedInstance
from onadata.apps.logger.models import Instance
from onadata.apps.viewer.tasks import create_xls_export
from onadata.libs.utils.export_tools import generate_export,\
increment_index_in_filename, dict_to_joined_export
AMBULANCE_KEY = 'transport/available_transportation_types_to_referral_fac'\
'ility/ambulance'
AMBULANCE_KEY_DOTS = 'transport.available_transportation_types_to_referra'\
'l_facility.ambulance'
def _main_fixture_path(instance_name):
return os.path.join(settings.PROJECT_ROOT, 'apps', 'main', 'tests',
'fixtures', 'transportation', 'instances_w_uuid',
instance_name, instance_name + '.xml')
class TestExports(TestBase):
def setUp(self):
super(TestExports, self).setUp()
self._submission_time = parse_datetime('2013-02-18 15:54:01Z')
def test_unique_xls_sheet_name(self):
xls_writer = XlsWriter()
xls_writer.add_sheet('section9_pit_latrine_with_slab_group')
xls_writer.add_sheet('section9_pit_latrine_without_slab_group')
# create a set of sheet names keys
sheet_names_set = set(xls_writer._sheets.keys())
self.assertEqual(len(sheet_names_set), 2)
def test_csv_http_response(self):
self._publish_transportation_form()
survey = self.surveys[0]
self._make_submission(
os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', survey, survey + '.xml'),
forced_submission_time=self._submission_time)
response = self.client.get(reverse(
'csv_export',
kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
test_file_path = viewer_fixture_path('transportation.csv')
content = self._get_response_content(response)
with open(test_file_path, 'r') as test_file:
self.assertEqual(content, test_file.read())
def test_csv_without_na_values(self):
self._publish_transportation_form()
survey = self.surveys[0]
self._make_submission(
os.path.join(
self.this_directory, 'fixtures', 'transportation',
'instances', survey, survey + '.xml'),
forced_submission_time=self._submission_time)
na_rep_restore = settings.NA_REP
settings.NA_REP = u''
response = self.client.get(reverse(
'csv_export',
kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
test_file_path = viewer_fixture_path('transportation_without_na.csv')
content = self._get_response_content(response)
with open(test_file_path, 'r') as test_file:
self.assertEqual(content, test_file.read())
settings.NA_REP = na_rep_restore
def test_responses_for_empty_exports(self):
self._publish_transportation_form()
# test csv though xls uses the same view
url = reverse(
'csv_export',
kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}
)
self.response = self.client.get(url)
self.assertEqual(self.response.status_code, 404)
self.assertIn('text/html', self.response['content-type'])
def test_create_export(self):
self._publish_transportation_form_and_submit_instance()
storage = get_storage_class()()
# test xls
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
self.assertTrue(storage.exists(export.filepath))
path, ext = os.path.splitext(export.filename)
self.assertEqual(ext, '.xls')
# test csv
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
self.assertTrue(storage.exists(export.filepath))
path, ext = os.path.splitext(export.filename)
self.assertEqual(ext, '.csv')
# test xls with existing export_id
existing_export = Export.objects.create(xform=self.xform,
export_type=Export.XLS_EXPORT)
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string, existing_export.id)
self.assertEqual(existing_export.id, export.id)
def test_delete_file_on_export_delete(self):
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
storage = get_storage_class()()
self.assertTrue(storage.exists(export.filepath))
# delete export object
export.delete()
self.assertFalse(storage.exists(export.filepath))
def test_graceful_exit_on_export_delete_if_file_doesnt_exist(self):
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
storage = get_storage_class()()
# delete file
storage.delete(export.filepath)
self.assertFalse(storage.exists(export.filepath))
# clear filename, like it would be in an incomplete export
export.filename = None
export.filedir = None
export.save()
# delete export record, which should try to delete file as well
delete_url = reverse(delete_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
post_data = {'export_id': export.id}
response = self.client.post(delete_url, post_data)
self.assertEqual(response.status_code, 302)
def test_delete_oldest_export_on_limit(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create first export
first_export = generate_export(
Export.XLS_EXPORT, 'xls', self.user.username, self.xform.id_string)
self.assertIsNotNone(first_export.pk)
# create exports that exceed set limit
for i in range(Export.MAX_EXPORTS):
generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
# first export should be deleted
exports = Export.objects.filter(id=first_export.id)
self.assertEqual(len(exports), 0)
def test_create_export_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
num_exports = Export.objects.count()
# create export
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
# anonymous user has to login first
response = self.anon.post(create_export_url)
self.assertEqual(response.status_code, 302)
self.assertIn("/accounts/login", response['location'])
response = self.client.post(create_export_url)
self.assertEqual(response.status_code, 302)
self.assertEqual(Export.objects.count(), num_exports + 1)
def test_delete_export_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
exports = Export.objects.filter(id=export.id)
self.assertEqual(len(exports), 1)
delete_url = reverse(delete_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
post_data = {'export_id': export.id}
# anonymous user has to login first
response = self.anon.post(delete_url, post_data)
self.assertEqual(response.status_code, 302)
self.assertIn("/accounts/login", response['location'])
response = self.client.post(delete_url, post_data)
self.assertEqual(response.status_code, 302)
exports = Export.objects.filter(id=export.id)
self.assertEqual(len(exports), 0)
def test_export_progress_output(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create exports
for i in range(2):
generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
self.assertEqual(Export.objects.count(), 2)
# progress for multiple exports
progress_url = reverse(export_progress, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
get_data = {'export_ids': [e.id for e in Export.objects.all()]}
response = self.client.get(progress_url, get_data)
content = json.loads(response.content)
self.assertEqual(len(content), 2)
self.assertEqual(sorted(['url', 'export_id', 'complete', 'filename']),
sorted(content[0].keys()))
def test_auto_export_if_none_exists(self):
self._publish_transportation_form()
self._submit_transport_instance()
# get export list url
num_exports = Export.objects.count()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
self.client.get(export_list_url)
self.assertEqual(Export.objects.count(), num_exports + 1)
def test_dont_auto_export_if_exports_exist(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
self.client.post(create_export_url)
num_exports = Export.objects.count()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
self.client.get(export_list_url)
self.assertEqual(Export.objects.count(), num_exports)
def test_last_submission_time_on_export(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
generate_export(
Export.XLS_EXPORT, 'xls', self.user.username, self.xform.id_string)
num_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
# check that our function knows there are no more submissions
self.assertFalse(
Export.exports_outdated(xform=self.xform,
export_type=Export.XLS_EXPORT))
sleep(1)
# force new last submission date on xform
last_submission = self.xform.instances.order_by('-date_created')[0]
last_submission.date_created += datetime.timedelta(hours=1)
last_submission.save()
# check that our function knows data has changed
self.assertTrue(
Export.exports_outdated(xform=self.xform,
export_type=Export.XLS_EXPORT))
# check that requesting list url will generate a new export
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.XLS_EXPORT
})
self.client.get(export_list_url)
self.assertEqual(
Export.objects.filter(xform=self.xform,
export_type=Export.XLS_EXPORT).count(),
num_exports + 1)
# make sure another export type causes auto-generation
num_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.CSV_EXPORT
})
self.client.get(export_list_url)
self.assertEqual(
Export.objects.filter(xform=self.xform,
export_type=Export.CSV_EXPORT).count(),
num_exports + 1)
def test_last_submission_time_empty(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create export
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
# set time of last submission to None
export.time_of_last_submission = None
export.save()
self.assertTrue(Export.exports_outdated(xform=self.xform,
export_type=Export.XLS_EXPORT))
def test_invalid_export_type(self):
self._publish_transportation_form()
self._submit_transport_instance()
export_list_url = reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'invalid'
})
response = self.client.get(export_list_url)
self.assertEqual(response.status_code, 400)
# test create url
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'invalid'
})
response = self.client.post(create_export_url)
self.assertEqual(response.status_code, 400)
def test_add_index_to_filename(self):
filename = "file_name-123f.txt"
new_filename = increment_index_in_filename(filename)
expected_filename = "file_name-123f-1.txt"
self.assertEqual(new_filename, expected_filename)
# test file that already has an index
filename = "file_name-123.txt"
new_filename = increment_index_in_filename(filename)
expected_filename = "file_name-124.txt"
self.assertEqual(new_filename, expected_filename)
def test_duplicate_export_filename_is_renamed(self):
self._publish_transportation_form()
self._submit_transport_instance()
# TODO: mock the time
# only works if the time we time we generate the basename
# is exact to the second with the time the 2nd export is created
# create an export object in the db
basename = "%s_%s" % (
self.xform.id_string,
datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S"))
filename = basename + ".csv"
export = Export.objects.create(
xform=self.xform, export_type=Export.CSV_EXPORT, filename=filename)
# 2nd export
export_2 = generate_export(
Export.CSV_EXPORT, 'csv', self.user.username, self.xform.id_string)
if export.created_on.timetuple() == export_2.created_on.timetuple():
new_filename = increment_index_in_filename(filename)
self.assertEqual(new_filename, export_2.filename)
else:
self.skipTest("duplicate export filename test skipped "
"because export times differ.")
def test_export_download_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
csv_export_url = reverse(export_download, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.CSV_EXPORT,
"filename": export.filename
})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# test xls
export = generate_export(Export.XLS_EXPORT, 'xls', self.user.username,
self.xform.id_string)
xls_export_url = reverse(export_download, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.XLS_EXPORT,
"filename": export.filename
})
response = self.client.get(xls_export_url)
self.assertEqual(response.status_code, 200)
def test_404_on_export_io_error(self):
"""
Test that we return a 404 when the response_with_mimetype_and_name
encounters an IOError
"""
self._publish_transportation_form()
self._submit_transport_instance()
export = generate_export(Export.CSV_EXPORT, 'csv', self.user.username,
self.xform.id_string)
export_url = reverse(export_download, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.CSV_EXPORT,
"filename": export.filename
})
# delete the export
export.delete()
# access the export
response = self.client.get(export_url)
self.assertEqual(response.status_code, 404)
def test_deleted_submission_not_in_export(self):
self._publish_transportation_form()
initial_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self._submit_transport_instance(0)
self._submit_transport_instance(1)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count + 2)
# get id of second submission
instance_id = Instance.objects.filter(
xform=self.xform).order_by('id').reverse()[0].id
delete_url = reverse(
delete_data, kwargs={"username": self.user.username,
"id_string": self.xform.id_string})
params = {'id': instance_id}
self.client.post(delete_url, params)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count + 1)
# create the export
csv_export_url = reverse(
'csv_export', kwargs={"username": self.user.username,
"id_string": self.xform.id_string})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
f = StringIO.StringIO(self._get_response_content(response))
csv_reader = csv.reader(f)
num_rows = len([row for row in csv_reader])
f.close()
# number of rows == 2 i.e. initial_count + header plus one row
self.assertEqual(num_rows, initial_count + 2)
def test_edited_submissions_in_exports(self):
self._publish_transportation_form()
initial_count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
instance_name = 'transport_2011-07-25_19-05-36'
path = _main_fixture_path(instance_name)
self._make_submission(path)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count + 1)
# make edited submission - simulating what enketo would return
instance_name = 'transport_2011-07-25_19-05-36-edited'
path = _main_fixture_path(instance_name)
self._make_submission(path)
count = ParsedInstance.query_mongo(
self.user.username, self.xform.id_string, '{}', '[]', '{}',
count=True)[0]['count']
self.assertEqual(count, initial_count + 1)
# create the export
csv_export_url = reverse(
'csv_export', kwargs={"username": self.user.username,
"id_string": self.xform.id_string})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
f = StringIO.StringIO(self._get_response_content(response))
csv_reader = csv.DictReader(f)
data = [row for row in csv_reader]
f.close()
num_rows = len(data)
# number of rows == initial_count + 1
self.assertEqual(num_rows, initial_count + 1)
key = 'transport/loop_over_transport_types_frequency/ambulance/'\
'frequency_to_referral_facility'
self.assertEqual(data[initial_count][key], "monthly")
def test_export_ids_dont_have_comma_separation(self):
"""
It seems using {{ }} to output numbers greater than 1000 formats the
number with a thousand separator
"""
self._publish_transportation_form()
self._submit_transport_instance()
# create an in-complete export
export = Export.objects.create(id=1234, xform=self.xform,
export_type=Export.XLS_EXPORT)
self.assertEqual(export.pk, 1234)
export_list_url = reverse(
export_list, kwargs={
"username": self.user.username,
"id_string": self.xform.id_string,
"export_type": Export.XLS_EXPORT
})
response = self.client.get(export_list_url)
self.assertContains(response, '#delete-1234"')
self.assertNotContains(response, '#delete-1,234"')
def test_export_progress_updates(self):
"""
Test that after generate_export is called, we change out state to
pending and after its complete, we change it to complete, if we fail
between the two, updates, we have failed
"""
self._publish_transportation_form()
# generate an export that fails because of the NoRecordsFound exception
export = Export.objects.create(xform=self.xform,
export_type=Export.XLS_EXPORT)
# check that progress url says pending
progress_url = reverse(export_progress, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
params = {'export_ids': [export.id]}
response = self.client.get(progress_url, params)
status = json.loads(response.content)[0]
self.assertEqual(status["complete"], False)
self.assertEqual(status["filename"], None)
export.internal_status = Export.FAILED
export.save()
# check that progress url says failed
progress_url = reverse(export_progress, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
params = {'export_ids': [export.id]}
response = self.client.get(progress_url, params)
status = json.loads(response.content)[0]
self.assertEqual(status["complete"], True)
self.assertEqual(status["filename"], None)
# make a submission and create a valid export
self._submit_transport_instance()
create_xls_export(
self.user.username,
self.xform.id_string, export.id)
params = {'export_ids': [export.id]}
response = self.client.get(progress_url, params)
status = json.loads(response.content)[0]
self.assertEqual(status["complete"], True)
self.assertIsNotNone(status["filename"])
def test_direct_export_returns_newset_export_if_not_updated_since(self):
self._publish_transportation_form()
self._submit_transport_instance()
self.assertEqual(self.response.status_code, 201)
sleep(1)
self._submit_transport_instance_w_uuid("transport_2011-07-25_19-05-36")
self.assertEqual(self.response.status_code, 201)
initial_num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
initial_num_xls_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
# request a direct csv export
csv_export_url = reverse('csv_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
xls_export_url = reverse('xls_export', kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should have initial_num_exports + 1 exports
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 1)
# request another export without changing the data
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should still only have a single export object
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 1)
# this should not affect a direct XLS export
# and XLS should still re-generate
response = self.client.get(xls_export_url)
self.assertEqual(response.status_code, 200)
num_xls_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
self.assertEqual(num_xls_exports, initial_num_xls_exports + 1)
# make sure xls doesnt re-generate if data hasn't changed
response = self.client.get(xls_export_url)
self.assertEqual(response.status_code, 200)
num_xls_exports = Export.objects.filter(
xform=self.xform, export_type=Export.XLS_EXPORT).count()
self.assertEqual(num_xls_exports, initial_num_xls_exports + 1)
sleep(1)
# check that data edits cause a re-generation
self._submit_transport_instance_w_uuid(
"transport_2011-07-25_19-05-36-edited")
self.assertEqual(self.response.status_code, 201)
self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should have an extra export now that the data has been updated
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 2)
sleep(1)
# and when we delete
delete_url = reverse(delete_data, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
instance = Instance.objects.filter().order_by('-pk')[0]
response = self.client.post(delete_url, {'id': instance.id})
self.assertEqual(response.status_code, 200)
response = self.client.get(csv_export_url)
self.assertEqual(response.status_code, 200)
# we should have an extra export now that the data
# has been updated by the delete
num_csv_exports = Export.objects.filter(
xform=self.xform, export_type=Export.CSV_EXPORT).count()
self.assertEqual(num_csv_exports, initial_num_csv_exports + 3)
def test_exports_outdated_doesnt_consider_failed_exports(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create a bad export
export = Export.objects.create(
xform=self.xform, export_type=Export.XLS_EXPORT,
internal_status=Export.FAILED)
self.assertTrue(
Export.exports_outdated(self.xform, export.export_type))
def test_exports_outdated_considers_pending_exports(self):
self._publish_transportation_form()
self._submit_transport_instance()
# create a pending export
export = Export.objects.create(
xform=self.xform, export_type=Export.XLS_EXPORT,
internal_status=Export.PENDING)
self.assertFalse(
Export.exports_outdated(self.xform, export.export_type))
def _get_csv_data(self, filepath):
storage = get_storage_class()()
csv_file = storage.open(filepath)
reader = csv.DictReader(csv_file)
data = reader.next()
csv_file.close()
return data
def _get_xls_data(self, filepath):
storage = get_storage_class()()
with storage.open(filepath) as f:
workbook = open_workbook(file_contents=f.read())
transportation_sheet = workbook.sheet_by_name("transportation")
self.assertTrue(transportation_sheet.nrows > 1)
headers = transportation_sheet.row_values(0)
column1 = transportation_sheet.row_values(1)
return dict(zip(headers, column1))
def test_column_header_delimiter_export_option(self):
self._publish_transportation_form()
# survey 1 has ambulance and bicycle as values for
# transport/available_transportation_types_to_referral_facility
self._submit_transport_instance(survey_at=1)
create_csv_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
})
default_params = {}
custom_params = {
'options[group_delimiter]': '.',
}
# test csv with default group delimiter
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
self.assertTrue(AMBULANCE_KEY in data)
self.assertEqual(data[AMBULANCE_KEY], 'True')
sleep(1)
# test csv with dot delimiter
response = self.client.post(create_csv_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
self.assertTrue(AMBULANCE_KEY_DOTS in data)
self.assertEqual(data[AMBULANCE_KEY_DOTS], 'True')
# test xls with default group delimiter
create_csv_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
self.assertTrue(AMBULANCE_KEY in data)
# xlrd reader seems to convert bools into integers i.e. 0 or 1
self.assertEqual(data[AMBULANCE_KEY], 1)
sleep(1)
# test xls with dot delimiter
response = self.client.post(create_csv_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
self.assertTrue(AMBULANCE_KEY_DOTS in data)
# xlrd reader seems to convert bools into integers i.e. 0 or 1
self.assertEqual(data[AMBULANCE_KEY_DOTS], 1)
def test_split_select_multiple_export_option(self):
self._publish_transportation_form()
self._submit_transport_instance(survey_at=1)
create_csv_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
})
default_params = {}
custom_params = {
'options[dont_split_select_multiples]': 'yes'
}
# test csv with default split select multiples
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
# we should have transport/available_transportation_types_to_referral_f
# acility/ambulance as a separate column
self.assertTrue(AMBULANCE_KEY in data)
self.assertEqual(data[AMBULANCE_KEY], 'True')
sleep(1)
# test csv with default split select multiples, binary select multiples
settings.BINARY_SELECT_MULTIPLES = True
response = self.client.post(create_csv_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
# we should have transport/available_transportation_types_to_referral_f
# acility/ambulance as a separate column
self.assertTrue(AMBULANCE_KEY in data)
self.assertEqual(data[AMBULANCE_KEY], '1')
settings.BINARY_SELECT_MULTIPLES = False
sleep(1)
# test csv without default split select multiples
response = self.client.post(create_csv_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='csv').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_csv_data(export.filepath)
# transport/available_transportation_types_to_referral_facility/ambulan
# ce should not be in its own column
self.assertFalse(AMBULANCE_KEY in data)
# transport/available_transportation_types_to_referral_facility should
# be a column
self.assertTrue(
'transport/available_transportation_types_to_referral_facility' in
data)
# check that ambulance is one the values within the transport/available
# _transportation_types_to_referral_facility column
self.assertTrue("ambulance" in data[
'transport/available_transportation_types_to_referral_facility'
].split(" "))
create_xls_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
})
# test xls with default split select multiples
response = self.client.post(create_xls_export_url, default_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
# we should have transport/available_transportation_types_to_referral_f
# acility/ambulance as a separate column
self.assertTrue(AMBULANCE_KEY in data)
sleep(1)
# test xls without default split select multiples
response = self.client.post(create_xls_export_url, custom_params)
self.assertEqual(response.status_code, 302)
export = Export.objects.filter(
xform=self.xform, export_type='xls').latest('created_on')
self.assertTrue(bool(export.filepath))
data = self._get_xls_data(export.filepath)
# transport/available_transportation_types_to_referral_facility/ambulan
# ce should NOT be in its own column
self.assertFalse(AMBULANCE_KEY in data)
# transport/available_transportation_types_to_referral_facility should
# be a column
self.assertTrue(
'transport/available_transportation_types_to_referral_facility'
in data)
# check that ambulance is one the values within the transport/available
# _transportation_types_to_referral_facility column
self.assertTrue("ambulance" in data[
'transport/available_transportation_types_to_referral_facility'
].split(" "))
def test_dict_to_joined_export_works(self):
data =\
{
'name': 'Abe',
'age': '35',
'_geolocation': [None, None],
'attachments': ['abcd.jpg', 'efgh.jpg'],
'children':
[
{
'children/name': 'Mike',
'children/age': '5',
'children/cartoons':
[
{
'children/cartoons/name': 'Tom & Jerry',
'children/cartoons/why': 'Tom is silly',
},
{
'children/cartoons/name': 'Flinstones',
'children/cartoons/why':
u"I like bamb bam\u0107",
}
]
},
{
'children/name': 'John',
'children/age': '2',
'children/cartoons': []
},
{
'children/name': 'Imora',
'children/age': '3',
'children/cartoons':
[
{
'children/cartoons/name': 'Shrek',
'children/cartoons/why': 'He\'s so funny'
},
{
'children/cartoons/name': 'Dexter\'s Lab',
'children/cartoons/why': 'He thinks hes smart',
'children/cartoons/characters':
[
{
'children/cartoons/characters/name':
'Dee Dee',
'children/cartoons/characters/good_or_'
'evil': 'good'
},
{
'children/cartoons/characters/name':
'Dexter',
'children/cartoons/characters/good_or_'
'evil': 'evil'
},
]
}
]
}
]
}
expected_output =\
{
'survey': {
'name': 'Abe',
'age': '35'
},
'children':
[
{
'children/name': 'Mike',
'children/age': '5',
'_index': 1,
'_parent_table_name': 'survey',
'_parent_index': 1
},
{
'children/name': 'John',
'children/age': '2',
'_index': 2,
'_parent_table_name': 'survey',
'_parent_index': 1
},
{
'children/name': 'Imora',
'children/age': '3',
'_index': 3,
'_parent_table_name': 'survey',
'_parent_index': 1
},
],
'children/cartoons':
[
{
'children/cartoons/name': 'Tom & Jerry',
'children/cartoons/why': 'Tom is silly',
'_index': 1,
'_parent_table_name': 'children',
'_parent_index': 1
},
{
'children/cartoons/name': 'Flinstones',
'children/cartoons/why': u"I like bamb bam\u0107",
'_index': 2,
'_parent_table_name': 'children',
'_parent_index': 1
},
{
'children/cartoons/name': 'Shrek',
'children/cartoons/why': 'He\'s so funny',
'_index': 3,
'_parent_table_name': 'children',
'_parent_index': 3
},
{
'children/cartoons/name': 'Dexter\'s Lab',
'children/cartoons/why': 'He thinks hes smart',
'_index': 4,
'_parent_table_name': 'children',
'_parent_index': 3
}
],
'children/cartoons/characters':
[
{
'children/cartoons/characters/name': 'Dee Dee',
'children/cartoons/characters/good_or_evil': 'good',
'_index': 1,
'_parent_table_name': 'children/cartoons',
'_parent_index': 4
},
{
'children/cartoons/characters/name': 'Dexter',
'children/cartoons/characters/good_or_evil': 'evil',
'_index': 2,
'_parent_table_name': 'children/cartoons',
'_parent_index': 4
}
]
}
survey_name = 'survey'
indices = {survey_name: 0}
output = dict_to_joined_export(data, 1, indices, survey_name)
self.assertEqual(output[survey_name], expected_output[survey_name])
# 1st level
self.assertEqual(len(output['children']), 3)
for child in enumerate(['Mike', 'John', 'Imora']):
index = child[0]
name = child[1]
self.assertEqual(
filter(
lambda x: x['children/name'] == name,
output['children'])[0],
expected_output['children'][index])
# 2nd level
self.assertEqual(len(output['children/cartoons']), 4)
for cartoon in enumerate(
['Tom & Jerry', 'Flinstones', 'Shrek', 'Dexter\'s Lab']):
index = cartoon[0]
name = cartoon[1]
self.assertEqual(
filter(
lambda x: x['children/cartoons/name'] == name,
output['children/cartoons'])[0],
expected_output['children/cartoons'][index])
# 3rd level
self.assertEqual(len(output['children/cartoons/characters']), 2)
for characters in enumerate(['Dee Dee', 'Dexter']):
index = characters[0]
name = characters[1]
self.assertEqual(
filter(
lambda x: x['children/cartoons/characters/name'] == name,
output['children/cartoons/characters'])[0],
expected_output['children/cartoons/characters'][index])
def test_generate_csv_zip_export(self):
# publish xls form
self._publish_transportation_form_and_submit_instance()
# create export db object
export = generate_export(
Export.CSV_ZIP_EXPORT, "zip", self.user.username,
self.xform.id_string, group_delimiter='/',
split_select_multiples=True)
storage = get_storage_class()()
self.assertTrue(storage.exists(export.filepath))
path, ext = os.path.splitext(export.filename)
self.assertEqual(ext, '.zip')
def test_dict_to_joined_export_notes(self):
submission = {
"_id": 579828,
"_submission_time": "2013-07-03T08:26:10",
"_uuid": "5b4752eb-e13c-483e-87cb-e67ca6bb61e5",
"_bamboo_dataset_id": "",
"_xform_id_string": "test_data_types",
"_userform_id": "larryweya_test_data_types",
"_status": "submitted_via_web",
"_notes": [
{
"note": "Note 1",
"date_created": "2013-07-03T08:26:10",
"id": 356,
"date_modified": "2013-07-03T08:26:10"
},
{
"note": "Note 2",
"date_created": "2013-07-03T08:34:40",
"id": 357,
"date_modified": "2013-07-03T08:34:40"
},
{
"note": "Note 3",
"date_created": "2013-07-03T08:56:14",
"id": 361,
"date_modified": "2013-07-03T08:56:14"
}
],
"meta/instanceID": "uuid:5b4752eb-e13c-483e-87cb-e67ca6bb61e5",
"formhub/uuid": "633ec390e024411ba5ce634db7807e62",
"amount": "",
}
survey_name = 'tutorial'
indices = {survey_name: 0}
data = dict_to_joined_export(submission, 1, indices, survey_name)
expected_data = {
'tutorial': {
'_id': 579828,
'_submission_time': '2013-07-03T08:26:10',
'_uuid': '5b4752eb-e13c-483e-87cb-e67ca6bb61e5',
'_bamboo_dataset_id': '',
'amount': '',
'_xform_id_string': 'test_data_types',
'_userform_id': 'larryweya_test_data_types',
'_status': 'submitted_via_web',
'_notes': 'Note 1\nNote 2\nNote 3',
'meta/instanceID': 'uuid:5b4752eb-e13c-483e-87cb-e67ca6bb61e5',
'formhub/uuid': '633ec390e024411ba5ce634db7807e62'
}
}
self.assertEqual(sorted(data), sorted(expected_data))
def test_create_xls_export_non_existent_id(self):
self._publish_transportation_form()
# make a submission and create a valid export
self._submit_transport_instance()
non_existent_id = 42
result = create_xls_export(
self.user.username,
self.xform.id_string, non_existent_id)
self.assertEqual(result, None)
def test_create_external_export_url(self):
self._publish_transportation_form()
self._submit_transport_instance()
num_exports = Export.objects.count()
server = 'http://localhost:8080/xls/23fa4c38c0054748a984ffd89021a295'
data_value = 'template 1 |{0}'.format(server)
meta = MetaData.external_export(self.xform, data_value)
custom_params = {
'meta': meta.id,
}
# create export
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.EXTERNAL_EXPORT
})
response = self.client.post(create_export_url, custom_params)
self.assertEqual(response.status_code, 302)
self.assertEqual(Export.objects.count(), num_exports + 1)
def test_create_external_export_without_template(self):
self._publish_transportation_form()
self._submit_transport_instance()
num_exports = Export.objects.count()
# create export
create_export_url = reverse(create_export, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': Export.EXTERNAL_EXPORT
})
response = self.client.post(create_export_url)
self.assertEqual(response.status_code, 403)
self.assertEquals(response.content, u'No XLS Template set.')
self.assertEqual(Export.objects.count(), num_exports)
|
|
# License: BSD 3 clause
import numpy as np
from . import Solver
from tick.base_model import Model
from tick.prox.base import Prox
__author__ = 'Stephane Gaiffas'
class SolverFirstOrder(Solver):
"""The base class for a first order solver. It defines methods for
setting a model (giving first order information) and a proximal
operator
In only deals with verbosing information, and setting parameters
Parameters
----------
step : `float` default=None
Step-size of the algorithm
tol : `float`, default=0
The tolerance of the solver (iterations stop when the stopping
criterion is below it). By default the solver does ``max_iter``
iterations
max_iter : `int`
Maximum number of iterations of the solver
verbose : `bool`, default=True
If `True`, we verbose things, otherwise the solver does not
print anything (but records information in history anyway)
print_every : `int`, default = 10
Print history information every time the iteration number is a
multiple of ``print_every``
record_every : `int`, default = 1
Information along iteration is recorded in history each time the
iteration number of a multiple of ``record_every``
Attributes
----------
model : `Model`
The model to solve
prox : `Prox`
Proximal operator to solve
dtype : `{'float64', 'float32'}`, default='float64'
Type of the arrays used. This value is set from model and prox dtypes.
Notes
-----
This class should not be used by end-users
"""
_attrinfos = {
"model": {
"writable": False
},
"prox": {
"writable": False
},
"_initial_n_calls_loss_and_grad": {
"writable": False
},
"_initial_n_calls_loss": {
"writable": False
},
"_initial_n_calls_grad": {
"writable": False
},
"_initial_n_passes_over_data": {
"writable": False
},
}
def __init__(self, step: float = None, tol: float = 0.,
max_iter: int = 100, verbose: bool = True,
print_every: int = 10, record_every: int = 1):
self.dtype = None
Solver.__init__(self, tol, max_iter, verbose, print_every,
record_every)
self.model = None
self.prox = None
self.step = step
# Martin's complicated and useless stuff :)
self._initial_n_calls_loss_and_grad = 0
self._initial_n_calls_loss = 0
self._initial_n_calls_grad = 0
self._initial_n_passes_over_data = 0
def validate_model(self, model: Model):
if not isinstance(model, Model):
raise ValueError('Passed object of class %s is not a '
'Model class' % model.name)
if not model._fitted:
raise ValueError('Passed object %s has not been fitted. You must '
'call ``fit`` on it before passing it to '
'``set_model``' % model.name)
def set_model(self, model: Model):
"""Set model in the solver
Parameters
----------
model : `Model`
Sets the model in the solver. The model gives the first
order information about the model (loss, gradient, among
other things)
Returns
-------
output : `Solver`
The same instance with given model
"""
self.validate_model(model)
self.dtype = model.dtype
self._set("model", model)
return self
def _initialize_values(self, x0: np.ndarray = None, step: float = None,
n_empty_vectors: int = 0):
"""Initialize values
Parameters
----------
x0 : `numpy.ndarray`
Starting point
step : `float`
Initial step
n_empty_vectors : `int`
Number of empty vector of like x0 needed
Returns
-------
step : `float`
Initial step
obj : `float`
Initial value of objective function
iterate : `numpy.ndarray`
copy of starting point
empty vectors : `numpy.ndarray`
n_empty_vectors empty vectors shaped as x0. For example, those
vectors can be used to store previous iterate values during
a solver execution.
"""
# Initialization
if step is None:
if self.step is None:
raise ValueError("No step specified.")
else:
step = self.step
else:
self.step = step
if x0 is None:
x0 = np.zeros(self.model.n_coeffs, dtype=self.dtype)
iterate = x0.copy()
obj = self.objective(iterate)
result = [step, obj, iterate]
for _ in range(n_empty_vectors):
result.append(np.zeros_like(x0))
return tuple(result)
def set_prox(self, prox: Prox):
"""Set proximal operator in the solver
Parameters
----------
prox : `Prox`
The proximal operator of the penalization function
Returns
-------
output : `Solver`
The solver with given prox
Notes
-----
In some solvers, ``set_model`` must be called before
``set_prox``, otherwise and error might be raised
"""
if not isinstance(prox, Prox):
raise ValueError('Passed object of class %s is not a '
'Prox class' % prox.name)
if self.dtype is None or self.model is None:
raise ValueError("Solver must call set_model before set_prox")
if prox.dtype != self.dtype:
prox = prox.astype(self.dtype)
self._set("prox", prox)
return self
def astype(self, dtype_or_object_with_dtype):
if self.model is None:
raise ValueError("Cannot reassign solver without a model")
import tick.base.dtype_to_cpp_type
new_solver = tick.base.dtype_to_cpp_type.copy_with(
self,
["prox", "model"] # ignore on deepcopy
)
new_solver.dtype = tick.base.dtype_to_cpp_type.extract_dtype(
dtype_or_object_with_dtype)
new_solver.set_model(self.model.astype(new_solver.dtype))
if self.prox is not None:
new_solver.set_prox(self.prox.astype(new_solver.dtype))
return new_solver
def _as_dict(self):
dd = Solver._as_dict(self)
if self.model is not None:
dd["model"] = self.model._as_dict()
if self.prox is not None:
dd["prox"] = self.prox._as_dict()
return dd
def objective(self, coeffs, loss: float = None):
"""Compute the objective function
Parameters
----------
coeffs : `np.array`, shape=(n_coeffs,)
Point where the objective is computed
loss : `float`, default=`None`
Gives the value of the loss if already known (allows to
avoid its computation in some cases)
Returns
-------
output : `float`
Value of the objective at given ``coeffs``
"""
if self.prox is None:
prox_value = 0
else:
prox_value = self.prox.value(coeffs)
if loss is None:
return self.model.loss(coeffs) + prox_value
else:
return loss + prox_value
def solve(self, x0=None, step=None):
"""
Launch the solver
Parameters
----------
x0 : `np.array`, shape=(n_coeffs,), default=`None`
Starting point of the solver
step : `float`, default=`None`
Step-size or learning rate for the solver. This can be tuned also
using the ``step`` attribute
Returns
-------
output : `np.array`, shape=(n_coeffs,)
Obtained minimizer for the problem, same as ``solution`` attribute
"""
if x0 is not None and self.dtype is not "float64":
x0 = x0.astype(self.dtype)
if self.model is None:
raise ValueError('You must first set the model using '
'``set_model``.')
if self.prox is None:
raise ValueError('You must first set the prox using '
'``set_prox``.')
solution = Solver.solve(self, x0, step)
return solution
def _handle_history(self, n_iter: int, force: bool = False, **kwargs):
"""Updates the history of the solver.
Parameters
----------
Notes
-----
This should not be used by end-users.
"""
# self.model.n_calls_loss_and_grad is shared by all
# solvers using this model
# hence it might not be at 0 while starting
# /!\ beware if parallel computing...
if n_iter == 1:
self._set("_initial_n_calls_loss_and_grad",
self.model.n_calls_loss_and_grad)
self._set("_initial_n_calls_loss", self.model.n_calls_loss)
self._set("_initial_n_calls_grad", self.model.n_calls_grad)
self._set("_initial_n_passes_over_data",
self.model.n_passes_over_data)
n_calls_loss_and_grad = \
self.model.n_calls_loss_and_grad - \
self._initial_n_calls_loss_and_grad
n_calls_loss = \
self.model.n_calls_loss - self._initial_n_calls_loss
n_calls_grad = \
self.model.n_calls_grad - self._initial_n_calls_grad
n_passes_over_data = \
self.model.n_passes_over_data - \
self._initial_n_passes_over_data
Solver.\
_handle_history(self, n_iter, force=force,
n_calls_loss_and_grad=n_calls_loss_and_grad,
n_calls_loss=n_calls_loss,
n_calls_grad=n_calls_grad,
n_passes_over_data=n_passes_over_data,
**kwargs)
|
|
# -*- coding: utf-8 -*-
import time
import logging
import functools
import httplib as http
import bleach
from flask import request
from modularodm import Q
from framework.auth.decorators import collect_auth
from framework.auth.decorators import must_be_logged_in
from website.models import Node
from website.models import User
from website.search import util
from website.util import api_url_for
from website.search import exceptions
from website.search import share_search
import website.search.search as search
from framework.exceptions import HTTPError
from website.search.exceptions import IndexNotFoundError
from website.search.exceptions import MalformedQueryError
from website.search.util import build_query
from website.project.views.contributor import get_node_contributors_abbrev
logger = logging.getLogger(__name__)
RESULTS_PER_PAGE = 250
def handle_search_errors(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except exceptions.MalformedQueryError:
raise HTTPError(http.BAD_REQUEST, data={
'message_short': 'Bad search query',
'message_long': ('Please check our help (the question mark beside the search box) for more information '
'on advanced search queries.'),
})
except exceptions.SearchUnavailableError:
raise HTTPError(http.SERVICE_UNAVAILABLE, data={
'message_short': 'Search unavailable',
'message_long': ('Our search service is currently unavailable, if the issue persists, '
'please report it to <a href="mailto:[email protected]">[email protected]</a>.'),
})
return wrapped
@handle_search_errors
def search_search(**kwargs):
_type = kwargs.get('type', None)
tick = time.time()
results = {}
if request.method == 'POST':
results = search.search(request.get_json(), doc_type=_type)
elif request.method == 'GET':
q = request.args.get('q', '*')
# TODO Match javascript params?
start = request.args.get('from', '0')
size = request.args.get('size', '10')
results = search.search(build_query(q, start, size), doc_type=_type)
results['time'] = round(time.time() - tick, 2)
return results
def conditionally_add_query_item(query, item, condition):
""" Helper for the search_projects_by_title function which will add a condition to a query
It will give an error if the proper search term is not used.
:param query: The modular ODM query that you want to modify
:param item: the field to query on
:param condition: yes, no, or either
:return: the modified query
"""
condition = condition.lower()
if condition == "yes":
return query & Q(item, 'eq', True)
elif condition == "no":
return query & Q(item, 'eq', False)
elif condition == "either":
return query
raise HTTPError(http.BAD_REQUEST)
@must_be_logged_in
def search_projects_by_title(**kwargs):
""" Search for nodes by title. Can pass in arguments from the URL to modify the search
:arg term: The substring of the title.
:arg category: Category of the node.
:arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search.
:arg includePublic: yes or no. Whether the projects listed should include public projects.
:arg includeContributed: yes or no. Whether the search should include projects the current user has
contributed to.
:arg ignoreNode: a list of nodes that should not be included in the search.
:return: a list of dictionaries of projects
"""
# TODO(fabianvf): At some point, it would be nice to do this with elastic search
user = kwargs['auth'].user
term = request.args.get('term', '')
max_results = int(request.args.get('maxResults', '10'))
category = request.args.get('category', 'project').lower()
is_deleted = request.args.get('isDeleted', 'no').lower()
is_folder = request.args.get('isFolder', 'no').lower()
is_registration = request.args.get('isRegistration', 'no').lower()
include_public = request.args.get('includePublic', 'yes').lower()
include_contributed = request.args.get('includeContributed', 'yes').lower()
ignore_nodes = request.args.getlist('ignoreNode', [])
matching_title = (
Q('title', 'icontains', term) & # search term (case insensitive)
Q('category', 'eq', category) # is a project
)
matching_title = conditionally_add_query_item(matching_title, 'is_deleted', is_deleted)
matching_title = conditionally_add_query_item(matching_title, 'is_folder', is_folder)
matching_title = conditionally_add_query_item(matching_title, 'is_registration', is_registration)
if len(ignore_nodes) > 0:
for node_id in ignore_nodes:
matching_title = matching_title & Q('_id', 'ne', node_id)
my_projects = []
my_project_count = 0
public_projects = []
if include_contributed == "yes":
my_projects = Node.find(
matching_title &
Q('contributors', 'contains', user._id) # user is a contributor
).limit(max_results)
my_project_count = my_project_count
if my_project_count < max_results and include_public == "yes":
public_projects = Node.find(
matching_title &
Q('is_public', 'eq', True) # is public
).limit(max_results - my_project_count)
results = list(my_projects) + list(public_projects)
ret = process_project_search_results(results, **kwargs)
return ret
@must_be_logged_in
def process_project_search_results(results, **kwargs):
"""
:param results: list of projects from the modular ODM search
:return: we return the entire search result, which is a list of
dictionaries. This includes the list of contributors.
"""
user = kwargs['auth'].user
ret = []
for project in results:
authors = get_node_contributors_abbrev(project=project, auth=kwargs['auth'])
authors_html = ''
for author in authors['contributors']:
a = User.load(author['user_id'])
authors_html += '<a href="%s">%s</a>' % (a.url, a.fullname)
authors_html += author['separator'] + ' '
authors_html += ' ' + authors['others_count']
ret.append({
'id': project._id,
'label': project.title,
'value': project.title,
'category': 'My Projects' if user in project.contributors else 'Public Projects',
'authors': authors_html,
})
return ret
@collect_auth
def search_contributor(auth):
user = auth.user if auth else None
nid = request.args.get('excludeNode')
exclude = Node.load(nid).contributors if nid else []
query = bleach.clean(request.args.get('query', ''), tags=[], strip=True)
page = int(bleach.clean(request.args.get('page', '0'), tags=[], strip=True))
size = int(bleach.clean(request.args.get('size', '5'), tags=[], strip=True))
return search.search_contributor(query=query, page=page, size=size,
exclude=exclude, current_user=user)
@handle_search_errors
def search_share():
tick = time.time()
results = {}
count = request.args.get('count') is not None
raw = request.args.get('raw') is not None
if request.method == 'POST':
query = request.get_json()
elif request.method == 'GET':
query = build_query(
request.args.get('q', '*'),
request.args.get('from', 0),
request.args.get('size', 10),
sort=request.args.get('sort')
)
if count:
results = search.count_share(query)
else:
results = search.search_share(query, raw)
results['time'] = round(time.time() - tick, 2)
return results
@handle_search_errors
def search_share_stats():
q = request.args.get('q')
query = build_query(q, 0, 0) if q else {}
return search.share_stats(query=query)
@handle_search_errors
def search_share_atom(**kwargs):
q = request.args.get('q', '*')
sort = request.args.get('sort', 'dateUpdated')
# we want the results per page to be constant between pages
# TODO - move this functionality into build_query in util
start = util.compute_start(request.args.get('page', 1), RESULTS_PER_PAGE)
query = build_query(q, size=RESULTS_PER_PAGE, start=start, sort=sort)
try:
search_results = search.search_share(query)
except MalformedQueryError:
raise HTTPError(http.BAD_REQUEST)
except IndexNotFoundError:
search_results = {
'count': 0,
'results': []
}
atom_url = api_url_for('search_share_atom', _xml=True, _absolute=True)
return util.create_atom_feed(
name='SHARE',
data=search_results['results'],
query=q,
size=RESULTS_PER_PAGE,
start=start,
url=atom_url,
to_atom=share_search.to_atom
)
def search_share_providers():
return search.share_providers()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.eager import backprop
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.distributions import multinomial
from tensorflow.python.platform import test
class MultinomialTest(test.TestCase):
def setUp(self):
self._rng = np.random.RandomState(42)
def testSimpleShapes(self):
with self.cached_session():
p = [.1, .3, .6]
dist = multinomial.Multinomial(total_count=1., probs=p)
self.assertEqual(3, dist.event_shape_tensor().eval())
self.assertAllEqual([], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([3]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([]), dist.batch_shape)
def testComplexShapes(self):
with self.cached_session():
p = 0.5 * np.ones([3, 2, 2], dtype=np.float32)
n = [[3., 2], [4, 5], [6, 7]]
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual(2, dist.event_shape_tensor().eval())
self.assertAllEqual([3, 2], dist.batch_shape_tensor().eval())
self.assertEqual(tensor_shape.TensorShape([2]), dist.event_shape)
self.assertEqual(tensor_shape.TensorShape([3, 2]), dist.batch_shape)
def testN(self):
p = [[0.1, 0.2, 0.7], [0.2, 0.3, 0.5]]
n = [[3.], [4]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p)
self.assertEqual((2, 1), dist.total_count.get_shape())
self.assertAllClose(n, dist.total_count.eval())
def testP(self):
p = [[0.1, 0.2, 0.7]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=3., probs=p)
self.assertEqual((1, 3), dist.probs.get_shape())
self.assertEqual((1, 3), dist.logits.get_shape())
self.assertAllClose(p, dist.probs.eval())
def testLogits(self):
p = np.array([[0.1, 0.2, 0.7]], dtype=np.float32)
logits = np.log(p) - 50.
with self.cached_session():
multinom = multinomial.Multinomial(total_count=3., logits=logits)
self.assertEqual((1, 3), multinom.probs.get_shape())
self.assertEqual((1, 3), multinom.logits.get_shape())
self.assertAllClose(p, multinom.probs.eval())
self.assertAllClose(logits, multinom.logits.eval())
def testPmfUnderflow(self):
logits = np.array([[-200, 0]], dtype=np.float32)
with self.cached_session():
dist = multinomial.Multinomial(total_count=1., logits=logits)
lp = dist.log_prob([1., 0.]).eval()[0]
self.assertAllClose(-200, lp, atol=0, rtol=1e-6)
def testPmfandCountsAgree(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
dist = multinomial.Multinomial(total_count=n, probs=p, validate_args=True)
dist.prob([2., 3, 0]).eval()
dist.prob([3., 0, 2]).eval()
with self.assertRaisesOpError("must be non-negative"):
dist.prob([-1., 4, 2]).eval()
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
dist.prob([3., 3, 0]).eval()
def testPmfNonIntegerCounts(self):
p = [[0.1, 0.2, 0.7]]
n = [[5.]]
with self.cached_session():
# No errors with integer n.
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=True)
multinom.prob([2., 1, 2]).eval()
multinom.prob([3., 0, 2]).eval()
# Counts don't sum to n.
with self.assertRaisesOpError("counts must sum to `self.total_count`"):
multinom.prob([2., 3, 2]).eval()
# Counts are non-integers.
x = array_ops.placeholder(dtypes.float32)
with self.assertRaisesOpError(
"cannot contain fractional components."):
multinom.prob(x).eval(feed_dict={x: [1.0, 2.5, 1.5]})
multinom = multinomial.Multinomial(
total_count=n, probs=p, validate_args=False)
multinom.prob([1., 2., 2.]).eval()
# Non-integer arguments work.
multinom.prob([1.0, 2.5, 1.5]).eval()
def testPmfBothZeroBatches(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.5, 0.5]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(0.5, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfBothZeroBatchesNontrivialN(self):
with self.cached_session():
# Both zero-batches. No broadcast
p = [0.1, 0.9]
counts = [3., 2]
dist = multinomial.Multinomial(total_count=5., probs=p)
pmf = dist.prob(counts)
# 5 choose 3 = 5 choose 2 = 10. 10 * (.9)^2 * (.1)^3 = 81/10000.
self.assertAllClose(81. / 10000, pmf.eval())
self.assertEqual((), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9]]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfPStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [[1., 0], [0, 1]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose([0.1, 0.9], pmf.eval())
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenSameRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [[1., 0]]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual((2), pmf.get_shape())
def testPmfCountsStretchedInBroadcastWhenLowerRank(self):
with self.cached_session():
p = [[0.1, 0.9], [0.7, 0.3]]
counts = [1., 0]
pmf = multinomial.Multinomial(total_count=1., probs=p).prob(counts)
self.assertAllClose(pmf.eval(), [0.1, 0.7])
self.assertEqual(pmf.get_shape(), (2))
def testPmfShapeCountsStretchedN(self):
with self.cached_session():
# [2, 2, 2]
p = [[[0.1, 0.9], [0.1, 0.9]], [[0.7, 0.3], [0.7, 0.3]]]
# [2, 2]
n = [[3., 3], [3, 3]]
# [2]
counts = [2., 1]
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
pmf.eval()
self.assertEqual(pmf.get_shape(), (2, 2))
def testPmfShapeCountsPStretchedN(self):
with self.cached_session():
p = [0.1, 0.9]
counts = [3., 2]
n = np.full([4, 3], 5., dtype=np.float32)
pmf = multinomial.Multinomial(total_count=n, probs=p).prob(counts)
pmf.eval()
self.assertEqual((4, 3), pmf.get_shape())
def testMultinomialMean(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_means = 5 * np.array(p, dtype=np.float32)
self.assertEqual((3,), dist.mean().get_shape())
self.assertAllClose(expected_means, dist.mean().eval())
def testMultinomialCovariance(self):
with self.cached_session():
n = 5.
p = [0.1, 0.2, 0.7]
dist = multinomial.Multinomial(total_count=n, probs=p)
expected_covariances = [[9. / 20, -1 / 10, -7 / 20],
[-1 / 10, 4 / 5, -7 / 10],
[-7 / 20, -7 / 10, 21 / 20]]
self.assertEqual((3, 3), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
def testMultinomialCovarianceBatch(self):
with self.cached_session():
# Shape [2]
n = [5.] * 2
# Shape [4, 1, 2]
p = [[[0.1, 0.9]], [[0.1, 0.9]]] * 2
dist = multinomial.Multinomial(total_count=n, probs=p)
# Shape [2, 2]
inner_var = [[9. / 20, -9 / 20], [-9 / 20, 9 / 20]]
# Shape [4, 2, 2, 2]
expected_covariances = [[inner_var, inner_var]] * 4
self.assertEqual((4, 2, 2, 2), dist.covariance().get_shape())
self.assertAllClose(expected_covariances, dist.covariance().eval())
def testCovarianceMultidimensional(self):
# Shape [3, 5, 4]
p = np.random.dirichlet([.25, .25, .25, .25], [3, 5]).astype(np.float32)
# Shape [6, 3, 3]
p2 = np.random.dirichlet([.3, .3, .4], [6, 3]).astype(np.float32)
ns = np.random.randint(low=1, high=11, size=[3, 5]).astype(np.float32)
ns2 = np.random.randint(low=1, high=11, size=[6, 1]).astype(np.float32)
with self.cached_session():
dist = multinomial.Multinomial(ns, p)
dist2 = multinomial.Multinomial(ns2, p2)
covariance = dist.covariance()
covariance2 = dist2.covariance()
self.assertEqual((3, 5, 4, 4), covariance.get_shape())
self.assertEqual((6, 3, 3, 3), covariance2.get_shape())
def testCovarianceFromSampling(self):
# We will test mean, cov, var, stddev on a DirichletMultinomial constructed
# via broadcast between alpha, n.
theta = np.array([[1., 2, 3],
[2.5, 4, 0.01]], dtype=np.float32)
theta /= np.sum(theta, 1)[..., array_ops.newaxis]
n = np.array([[10., 9.], [8., 7.], [6., 5.]], dtype=np.float32)
with self.cached_session() as sess:
# batch_shape=[3, 2], event_shape=[3]
dist = multinomial.Multinomial(n, theta)
x = dist.sample(int(1000e3), seed=1)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean[array_ops.newaxis, ...]
sample_cov = math_ops.reduce_mean(math_ops.matmul(
x_centered[..., array_ops.newaxis],
x_centered[..., array_ops.newaxis, :]), 0)
sample_var = array_ops.matrix_diag_part(sample_cov)
sample_stddev = math_ops.sqrt(sample_var)
[
sample_mean_,
sample_cov_,
sample_var_,
sample_stddev_,
analytic_mean,
analytic_cov,
analytic_var,
analytic_stddev,
] = sess.run([
sample_mean,
sample_cov,
sample_var,
sample_stddev,
dist.mean(),
dist.covariance(),
dist.variance(),
dist.stddev(),
])
self.assertAllClose(sample_mean_, analytic_mean, atol=0.01, rtol=0.01)
self.assertAllClose(sample_cov_, analytic_cov, atol=0.01, rtol=0.01)
self.assertAllClose(sample_var_, analytic_var, atol=0.01, rtol=0.01)
self.assertAllClose(sample_stddev_, analytic_stddev, atol=0.01, rtol=0.01)
def testSampleUnbiasedNonScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=[7., 6., 5.],
logits=math_ops.log(2. * self._rng.rand(4, 3, 2).astype(np.float32)))
n = int(3e4)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
# Cyclically rotate event dims left.
x_centered = array_ops.transpose(x - sample_mean, [1, 2, 3, 0])
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_b=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4, 3, 2], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 3, 2, 2], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testSampleUnbiasedScalarBatch(self):
with self.cached_session() as sess:
dist = multinomial.Multinomial(
total_count=5.,
logits=math_ops.log(2. * self._rng.rand(4).astype(np.float32)))
n = int(5e3)
x = dist.sample(n, seed=0)
sample_mean = math_ops.reduce_mean(x, 0)
x_centered = x - sample_mean # Already transposed to [n, 2].
sample_covariance = math_ops.matmul(
x_centered, x_centered, adjoint_a=True) / n
[
sample_mean_,
sample_covariance_,
actual_mean_,
actual_covariance_,
] = sess.run([
sample_mean,
sample_covariance,
dist.mean(),
dist.covariance(),
])
self.assertAllEqual([4], sample_mean.get_shape())
self.assertAllClose(actual_mean_, sample_mean_, atol=0., rtol=0.10)
self.assertAllEqual([4, 4], sample_covariance.get_shape())
self.assertAllClose(
actual_covariance_, sample_covariance_, atol=0., rtol=0.20)
def testNotReparameterized(self):
total_count = constant_op.constant(5.0)
p = constant_op.constant([0.2, 0.6])
with backprop.GradientTape() as tape:
tape.watch(total_count)
tape.watch(p)
dist = multinomial.Multinomial(
total_count=total_count,
probs=p)
samples = dist.sample(100)
grad_total_count, grad_p = tape.gradient(samples, [total_count, p])
self.assertIsNone(grad_total_count)
self.assertIsNone(grad_p)
if __name__ == "__main__":
test.main()
|
|
# -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <[email protected]>
# Olivier Grisel <[email protected]>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_is_fitted
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(ClassifierMixin, BaseEstimator):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : str or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
.. versionchanged:: 0.19
``metric='precomputed'`` was deprecated and now raises an error
shrink_threshold : float, default=None
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like of shape (n_classes, n_features)
Centroid of each class.
classes_ : array of shape (n_classes,)
The unique classes labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.neighbors import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See Also
--------
KNeighborsClassifier : Nearest neighbors classifier.
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
def __init__(self, metric='euclidean', *, shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array-like of shape (n_samples,)
Target values (integers)
"""
if self.metric == 'precomputed':
raise ValueError("Precomputed is not supported.")
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = self._validate_data(X, y, accept_sparse=['csc'])
else:
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
if np.all(np.ptp(X, axis=0) == 0):
raise ValueError("All features have zero variance. "
"Division by zero.")
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) - (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
np.clip(deviation, 0, None, out=deviation)
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse='csr', reset=False)
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
|
|
import sys
import unittest
from test import support
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
L = [
('0', 0),
('1', 1),
('9', 9),
('10', 10),
('99', 99),
('100', 100),
('314', 314),
(' 314', 314),
('314 ', 314),
(' \t\t 314 \t\t ', 314),
(repr(sys.maxsize), sys.maxsize),
(' 1x', ValueError),
(' 1 ', 1),
(' 1\02 ', ValueError),
('', ValueError),
(' ', ValueError),
(' \t\t ', ValueError),
("\u0200", ValueError)
]
class IntSubclass(int):
pass
class IntTestCases(unittest.TestCase):
def test_basic(self):
self.assertEqual(int(314), 314)
self.assertEqual(int(3.14), 3)
# Check that conversion from float truncates towards zero
self.assertEqual(int(-3.14), -3)
self.assertEqual(int(3.9), 3)
self.assertEqual(int(-3.9), -3)
self.assertEqual(int(3.5), 3)
self.assertEqual(int(-3.5), -3)
self.assertEqual(int("-3"), -3)
self.assertEqual(int(" -3 "), -3)
self.assertEqual(int("\N{EM SPACE}-3\N{EN SPACE}"), -3)
# Different base:
self.assertEqual(int("10",16), 16)
# Test conversion from strings and various anomalies
for s, v in L:
for sign in "", "+", "-":
for prefix in "", " ", "\t", " \t\t ":
ss = prefix + sign + s
vv = v
if sign == "-" and v is not ValueError:
vv = -v
try:
self.assertEqual(int(ss), vv)
except ValueError:
pass
s = repr(-1-sys.maxsize)
x = int(s)
self.assertEqual(x+1, -sys.maxsize)
self.assertIsInstance(x, int)
# should return int
self.assertEqual(int(s[1:]), sys.maxsize+1)
# should return int
x = int(1e100)
self.assertIsInstance(x, int)
x = int(-1e100)
self.assertIsInstance(x, int)
# SF bug 434186: 0x80000000/2 != 0x80000000>>1.
# Worked by accident in Windows release build, but failed in debug build.
# Failed in all Linux builds.
x = -1-sys.maxsize
self.assertEqual(x >> 1, x//2)
x = int('1' * 600)
self.assertIsInstance(x, int)
self.assertRaises(TypeError, int, 1, 12)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 16), 291)
# Bug 1679: "0x" is not a valid hex literal
self.assertRaises(ValueError, int, "0x", 16)
self.assertRaises(ValueError, int, "0x", 0)
self.assertRaises(ValueError, int, "0o", 8)
self.assertRaises(ValueError, int, "0o", 0)
self.assertRaises(ValueError, int, "0b", 2)
self.assertRaises(ValueError, int, "0b", 0)
# SF bug 1334662: int(string, base) wrong answers
# Various representations of 2**32 evaluated to 0
# rather than 2**32 in previous versions
self.assertEqual(int('100000000000000000000000000000000', 2), 4294967296)
self.assertEqual(int('102002022201221111211', 3), 4294967296)
self.assertEqual(int('10000000000000000', 4), 4294967296)
self.assertEqual(int('32244002423141', 5), 4294967296)
self.assertEqual(int('1550104015504', 6), 4294967296)
self.assertEqual(int('211301422354', 7), 4294967296)
self.assertEqual(int('40000000000', 8), 4294967296)
self.assertEqual(int('12068657454', 9), 4294967296)
self.assertEqual(int('4294967296', 10), 4294967296)
self.assertEqual(int('1904440554', 11), 4294967296)
self.assertEqual(int('9ba461594', 12), 4294967296)
self.assertEqual(int('535a79889', 13), 4294967296)
self.assertEqual(int('2ca5b7464', 14), 4294967296)
self.assertEqual(int('1a20dcd81', 15), 4294967296)
self.assertEqual(int('100000000', 16), 4294967296)
self.assertEqual(int('a7ffda91', 17), 4294967296)
self.assertEqual(int('704he7g4', 18), 4294967296)
self.assertEqual(int('4f5aff66', 19), 4294967296)
self.assertEqual(int('3723ai4g', 20), 4294967296)
self.assertEqual(int('281d55i4', 21), 4294967296)
self.assertEqual(int('1fj8b184', 22), 4294967296)
self.assertEqual(int('1606k7ic', 23), 4294967296)
self.assertEqual(int('mb994ag', 24), 4294967296)
self.assertEqual(int('hek2mgl', 25), 4294967296)
self.assertEqual(int('dnchbnm', 26), 4294967296)
self.assertEqual(int('b28jpdm', 27), 4294967296)
self.assertEqual(int('8pfgih4', 28), 4294967296)
self.assertEqual(int('76beigg', 29), 4294967296)
self.assertEqual(int('5qmcpqg', 30), 4294967296)
self.assertEqual(int('4q0jto4', 31), 4294967296)
self.assertEqual(int('4000000', 32), 4294967296)
self.assertEqual(int('3aokq94', 33), 4294967296)
self.assertEqual(int('2qhxjli', 34), 4294967296)
self.assertEqual(int('2br45qb', 35), 4294967296)
self.assertEqual(int('1z141z4', 36), 4294967296)
# tests with base 0
# this fails on 3.0, but in 2.x the old octal syntax is allowed
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int(' 0o123 ', 0), 83)
self.assertEqual(int('000', 0), 0)
self.assertEqual(int('0o123', 0), 83)
self.assertEqual(int('0x123', 0), 291)
self.assertEqual(int('0b100', 0), 4)
self.assertEqual(int(' 0O123 ', 0), 83)
self.assertEqual(int(' 0X123 ', 0), 291)
self.assertEqual(int(' 0B100 ', 0), 4)
# without base still base 10
self.assertEqual(int('0123'), 123)
self.assertEqual(int('0123', 10), 123)
# tests with prefix and base != 0
self.assertEqual(int('0x123', 16), 291)
self.assertEqual(int('0o123', 8), 83)
self.assertEqual(int('0b100', 2), 4)
self.assertEqual(int('0X123', 16), 291)
self.assertEqual(int('0O123', 8), 83)
self.assertEqual(int('0B100', 2), 4)
# the code has special checks for the first character after the
# type prefix
self.assertRaises(ValueError, int, '0b2', 2)
self.assertRaises(ValueError, int, '0b02', 2)
self.assertRaises(ValueError, int, '0B2', 2)
self.assertRaises(ValueError, int, '0B02', 2)
self.assertRaises(ValueError, int, '0o8', 8)
self.assertRaises(ValueError, int, '0o08', 8)
self.assertRaises(ValueError, int, '0O8', 8)
self.assertRaises(ValueError, int, '0O08', 8)
self.assertRaises(ValueError, int, '0xg', 16)
self.assertRaises(ValueError, int, '0x0g', 16)
self.assertRaises(ValueError, int, '0Xg', 16)
self.assertRaises(ValueError, int, '0X0g', 16)
# SF bug 1334662: int(string, base) wrong answers
# Checks for proper evaluation of 2**32 + 1
self.assertEqual(int('100000000000000000000000000000001', 2), 4294967297)
self.assertEqual(int('102002022201221111212', 3), 4294967297)
self.assertEqual(int('10000000000000001', 4), 4294967297)
self.assertEqual(int('32244002423142', 5), 4294967297)
self.assertEqual(int('1550104015505', 6), 4294967297)
self.assertEqual(int('211301422355', 7), 4294967297)
self.assertEqual(int('40000000001', 8), 4294967297)
self.assertEqual(int('12068657455', 9), 4294967297)
self.assertEqual(int('4294967297', 10), 4294967297)
self.assertEqual(int('1904440555', 11), 4294967297)
self.assertEqual(int('9ba461595', 12), 4294967297)
self.assertEqual(int('535a7988a', 13), 4294967297)
self.assertEqual(int('2ca5b7465', 14), 4294967297)
self.assertEqual(int('1a20dcd82', 15), 4294967297)
self.assertEqual(int('100000001', 16), 4294967297)
self.assertEqual(int('a7ffda92', 17), 4294967297)
self.assertEqual(int('704he7g5', 18), 4294967297)
self.assertEqual(int('4f5aff67', 19), 4294967297)
self.assertEqual(int('3723ai4h', 20), 4294967297)
self.assertEqual(int('281d55i5', 21), 4294967297)
self.assertEqual(int('1fj8b185', 22), 4294967297)
self.assertEqual(int('1606k7id', 23), 4294967297)
self.assertEqual(int('mb994ah', 24), 4294967297)
self.assertEqual(int('hek2mgm', 25), 4294967297)
self.assertEqual(int('dnchbnn', 26), 4294967297)
self.assertEqual(int('b28jpdn', 27), 4294967297)
self.assertEqual(int('8pfgih5', 28), 4294967297)
self.assertEqual(int('76beigh', 29), 4294967297)
self.assertEqual(int('5qmcpqh', 30), 4294967297)
self.assertEqual(int('4q0jto5', 31), 4294967297)
self.assertEqual(int('4000001', 32), 4294967297)
self.assertEqual(int('3aokq95', 33), 4294967297)
self.assertEqual(int('2qhxjlj', 34), 4294967297)
self.assertEqual(int('2br45qc', 35), 4294967297)
self.assertEqual(int('1z141z5', 36), 4294967297)
def test_underscores(self):
for lit in VALID_UNDERSCORE_LITERALS:
if any(ch in lit for ch in '.eEjJ'):
continue
self.assertEqual(int(lit, 0), eval(lit))
self.assertEqual(int(lit, 0), int(lit.replace('_', ''), 0))
for lit in INVALID_UNDERSCORE_LITERALS:
if any(ch in lit for ch in '.eEjJ'):
continue
self.assertRaises(ValueError, int, lit, 0)
# Additional test cases with bases != 0, only for the constructor:
self.assertEqual(int("1_00", 3), 9)
self.assertEqual(int("0_100"), 100) # not valid as a literal!
self.assertEqual(int(b"1_00"), 100) # byte underscore
self.assertRaises(ValueError, int, "_100")
self.assertRaises(ValueError, int, "+_100")
self.assertRaises(ValueError, int, "1__00")
self.assertRaises(ValueError, int, "100_")
@support.cpython_only
def test_small_ints(self):
# Bug #3236: Return small longs from PyLong_FromString
self.assertIs(int('10'), 10)
self.assertIs(int('-1'), -1)
self.assertIs(int(b'10'), 10)
self.assertIs(int(b'-1'), -1)
def test_no_args(self):
self.assertEqual(int(), 0)
def test_keyword_args(self):
# Test invoking int() using keyword arguments.
self.assertEqual(int('100', base=2), 4)
with self.assertRaisesRegex(TypeError, 'keyword argument'):
int(x=1.2)
with self.assertRaisesRegex(TypeError, 'keyword argument'):
int(x='100', base=2)
self.assertRaises(TypeError, int, base=10)
self.assertRaises(TypeError, int, base=0)
def test_int_base_limits(self):
"""Testing the supported limits of the int() base parameter."""
self.assertEqual(int('0', 5), 0)
with self.assertRaises(ValueError):
int('0', 1)
with self.assertRaises(ValueError):
int('0', 37)
with self.assertRaises(ValueError):
int('0', -909) # An old magic value base from Python 2.
with self.assertRaises(ValueError):
int('0', base=0-(2**234))
with self.assertRaises(ValueError):
int('0', base=2**234)
# Bases 2 through 36 are supported.
for base in range(2,37):
self.assertEqual(int('0', base=base), 0)
def test_int_base_bad_types(self):
"""Not integer types are not valid bases; issue16772."""
with self.assertRaises(TypeError):
int('0', 5.5)
with self.assertRaises(TypeError):
int('0', 5.0)
def test_int_base_indexable(self):
class MyIndexable(object):
def __init__(self, value):
self.value = value
def __index__(self):
return self.value
# Check out of range bases.
for base in 2**100, -2**100, 1, 37:
with self.assertRaises(ValueError):
int('43', base)
# Check in-range bases.
self.assertEqual(int('101', base=MyIndexable(2)), 5)
self.assertEqual(int('101', base=MyIndexable(10)), 101)
self.assertEqual(int('101', base=MyIndexable(36)), 1 + 36**2)
def test_non_numeric_input_types(self):
# Test possible non-numeric types for the argument x, including
# subclasses of the explicitly documented accepted types.
class CustomStr(str): pass
class CustomBytes(bytes): pass
class CustomByteArray(bytearray): pass
factories = [
bytes,
bytearray,
lambda b: CustomStr(b.decode()),
CustomBytes,
CustomByteArray,
memoryview,
]
try:
from array import array
except ImportError:
pass
else:
factories.append(lambda b: array('B', b))
for f in factories:
x = f(b'100')
with self.subTest(type(x)):
self.assertEqual(int(x), 100)
if isinstance(x, (str, bytes, bytearray)):
self.assertEqual(int(x, 2), 4)
else:
msg = "can't convert non-string"
with self.assertRaisesRegex(TypeError, msg):
int(x, 2)
with self.assertRaisesRegex(ValueError, 'invalid literal'):
int(f(b'A' * 0x10))
def test_int_memoryview(self):
self.assertEqual(int(memoryview(b'123')[1:3]), 23)
self.assertEqual(int(memoryview(b'123\x00')[1:3]), 23)
self.assertEqual(int(memoryview(b'123 ')[1:3]), 23)
self.assertEqual(int(memoryview(b'123A')[1:3]), 23)
self.assertEqual(int(memoryview(b'1234')[1:3]), 23)
def test_string_float(self):
self.assertRaises(ValueError, int, '1.2')
def test_intconversion(self):
# Test __int__()
class ClassicMissingMethods:
pass
self.assertRaises(TypeError, int, ClassicMissingMethods())
class MissingMethods(object):
pass
self.assertRaises(TypeError, int, MissingMethods())
class Foo0:
def __int__(self):
return 42
self.assertEqual(int(Foo0()), 42)
class Classic:
pass
for base in (object, Classic):
class IntOverridesTrunc(base):
def __int__(self):
return 42
def __trunc__(self):
return -12
self.assertEqual(int(IntOverridesTrunc()), 42)
class JustTrunc(base):
def __trunc__(self):
return 42
self.assertEqual(int(JustTrunc()), 42)
class ExceptionalTrunc(base):
def __trunc__(self):
1 / 0
with self.assertRaises(ZeroDivisionError):
int(ExceptionalTrunc())
for trunc_result_base in (object, Classic):
class Index(trunc_result_base):
def __index__(self):
return 42
class TruncReturnsNonInt(base):
def __trunc__(self):
return Index()
self.assertEqual(int(TruncReturnsNonInt()), 42)
class Intable(trunc_result_base):
def __int__(self):
return 42
class TruncReturnsNonIndex(base):
def __trunc__(self):
return Intable()
self.assertEqual(int(TruncReturnsNonInt()), 42)
class NonIntegral(trunc_result_base):
def __trunc__(self):
# Check that we avoid infinite recursion.
return NonIntegral()
class TruncReturnsNonIntegral(base):
def __trunc__(self):
return NonIntegral()
try:
int(TruncReturnsNonIntegral())
except TypeError as e:
self.assertEqual(str(e),
"__trunc__ returned non-Integral"
" (type NonIntegral)")
else:
self.fail("Failed to raise TypeError with %s" %
((base, trunc_result_base),))
# Regression test for bugs.python.org/issue16060.
class BadInt(trunc_result_base):
def __int__(self):
return 42.0
class TruncReturnsBadInt(base):
def __trunc__(self):
return BadInt()
with self.assertRaises(TypeError):
int(TruncReturnsBadInt())
def test_int_subclass_with_index(self):
class MyIndex(int):
def __index__(self):
return 42
class BadIndex(int):
def __index__(self):
return 42.0
my_int = MyIndex(7)
self.assertEqual(my_int, 7)
self.assertEqual(int(my_int), 7)
self.assertEqual(int(BadIndex()), 0)
def test_int_subclass_with_int(self):
class MyInt(int):
def __int__(self):
return 42
class BadInt(int):
def __int__(self):
return 42.0
my_int = MyInt(7)
self.assertEqual(my_int, 7)
self.assertEqual(int(my_int), 42)
my_int = BadInt(7)
self.assertEqual(my_int, 7)
self.assertRaises(TypeError, int, my_int)
def test_int_returns_int_subclass(self):
class BadIndex:
def __index__(self):
return True
class BadIndex2(int):
def __index__(self):
return True
class BadInt:
def __int__(self):
return True
class BadInt2(int):
def __int__(self):
return True
class TruncReturnsBadIndex:
def __trunc__(self):
return BadIndex()
class TruncReturnsBadInt:
def __trunc__(self):
return BadInt()
class TruncReturnsIntSubclass:
def __trunc__(self):
return True
bad_int = BadIndex()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
bad_int = BadIndex2()
n = int(bad_int)
self.assertEqual(n, 0)
self.assertIs(type(n), int)
bad_int = BadInt()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
bad_int = BadInt2()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
bad_int = TruncReturnsBadIndex()
with self.assertWarns(DeprecationWarning):
n = int(bad_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
bad_int = TruncReturnsBadInt()
self.assertRaises(TypeError, int, bad_int)
good_int = TruncReturnsIntSubclass()
n = int(good_int)
self.assertEqual(n, 1)
self.assertIs(type(n), int)
n = IntSubclass(good_int)
self.assertEqual(n, 1)
self.assertIs(type(n), IntSubclass)
def test_error_message(self):
def check(s, base=None):
with self.assertRaises(ValueError,
msg="int(%r, %r)" % (s, base)) as cm:
if base is None:
int(s)
else:
int(s, base)
self.assertEqual(cm.exception.args[0],
"invalid literal for int() with base %d: %r" %
(10 if base is None else base, s))
check('\xbd')
check('123\xbd')
check(' 123 456 ')
check('123\x00')
# SF bug 1545497: embedded NULs were not detected with explicit base
check('123\x00', 10)
check('123\x00 245', 20)
check('123\x00 245', 16)
check('123\x00245', 20)
check('123\x00245', 16)
# byte string with embedded NUL
check(b'123\x00')
check(b'123\x00', 10)
# non-UTF-8 byte string
check(b'123\xbd')
check(b'123\xbd', 10)
# lone surrogate in Unicode string
check('123\ud800')
check('123\ud800', 10)
def test_issue31619(self):
self.assertEqual(int('1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1_0_1', 2),
0b1010101010101010101010101010101)
self.assertEqual(int('1_2_3_4_5_6_7_0_1_2_3', 8), 0o12345670123)
self.assertEqual(int('1_2_3_4_5_6_7_8_9', 16), 0x123456789)
self.assertEqual(int('1_2_3_4_5_6_7', 32), 1144132807)
if __name__ == "__main__":
unittest.main()
|
|
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic SSH power manager.
Provides basic power control of virtual machines via SSH.
For use in dev and test environments.
Currently supported environments are:
Virtual Box (vbox)
Virsh (virsh)
Parallels (parallels)
"""
import os
from oslo.concurrency import processutils
from oslo.config import cfg
from ironic.common import boot_devices
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LE
from ironic.common.i18n import _LW
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers import base
from ironic.drivers import utils as driver_utils
from ironic.openstack.common import log as logging
libvirt_opts = [
cfg.StrOpt('libvirt_uri',
default='qemu:///system',
help='libvirt uri')
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, group='ssh')
LOG = logging.getLogger(__name__)
REQUIRED_PROPERTIES = {
'ssh_address': _("IP address or hostname of the node to ssh into. "
"Required."),
'ssh_username': _("username to authenticate as. Required."),
'ssh_virt_type': _("virtualization software to use; one of vbox, virsh, "
"vmware, parallels. Required.")
}
OTHER_PROPERTIES = {
'ssh_key_contents': _("private key(s). One of this, ssh_key_filename, "
"or ssh_password must be specified."),
'ssh_key_filename': _("(list of) filename(s) of optional private key(s) "
"for authentication. One of this, ssh_key_contents, "
"or ssh_password must be specified."),
'ssh_password': _("password to use for authentication or for unlocking a "
"private key. One of this, ssh_key_contents, or "
"ssh_key_filename must be specified."),
'ssh_port': _("port on the node to connect to; default is 22. Optional.")
}
COMMON_PROPERTIES = REQUIRED_PROPERTIES.copy()
COMMON_PROPERTIES.update(OTHER_PROPERTIES)
# NOTE(dguerri) Generic boot device map. Virtualisation types that don't define
# a more specific one, will use this.
# This is left for compatibility with other modules and is still valid for
# virsh and vmware.
_BOOT_DEVICES_MAP = {
boot_devices.DISK: 'hd',
boot_devices.PXE: 'network',
boot_devices.CDROM: 'cdrom',
}
def _get_boot_device_map(virt_type):
if virt_type in ('virsh', 'vmware'):
return _BOOT_DEVICES_MAP
elif virt_type == 'vbox':
return {
boot_devices.DISK: 'disk',
boot_devices.PXE: 'net',
boot_devices.CDROM: 'dvd',
}
elif virt_type == 'parallels':
return {
boot_devices.DISK: 'hdd0',
boot_devices.PXE: 'net0',
boot_devices.CDROM: 'cdrom0',
}
else:
raise exception.InvalidParameterValue(_(
"SSHPowerDriver '%(virt_type)s' is not a valid virt_type.") %
{'virt_type': virt_type})
def _get_command_sets(virt_type):
if virt_type == 'vbox':
return {
'base_cmd': 'LC_ALL=C /usr/bin/VBoxManage',
'start_cmd': 'startvm {_NodeName_}',
'stop_cmd': 'controlvm {_NodeName_} poweroff',
'reboot_cmd': 'controlvm {_NodeName_} reset',
'list_all': "list vms|awk -F'\"' '{print $2}'",
'list_running': 'list runningvms',
'get_node_macs': ("showvminfo --machinereadable {_NodeName_} | "
"awk -F '\"' '/macaddress/{print $2}'"),
'set_boot_device': ('{_BaseCmd_} modifyvm {_NodeName_} '
'--boot1 {_BootDevice_}'),
'get_boot_device': ("{_BaseCmd_} showvminfo "
"--machinereadable {_NodeName_} | "
"awk -F '\"' '/boot1/{print $2}'"),
}
elif virt_type == 'vmware':
return {
'base_cmd': 'LC_ALL=C /bin/vim-cmd',
'start_cmd': 'vmsvc/power.on {_NodeName_}',
'stop_cmd': 'vmsvc/power.off {_NodeName_}',
'reboot_cmd': 'vmsvc/power.reboot {_NodeName_}',
'list_all': "vmsvc/getallvms | awk '$1 ~ /^[0-9]+$/ {print $1}'",
# NOTE(arata): In spite of its name, list_running_cmd shows a
# single vmid, not a list. But it is OK.
'list_running': (
"vmsvc/power.getstate {_NodeName_} | "
"grep 'Powered on' >/dev/null && "
"echo '\"{_NodeName_}\"' || true"),
# NOTE(arata): `true` is needed to handle a false vmid, which can
# be returned by list_cmd. In that case, get_node_macs
# returns an empty list rather than fails with
# non-zero status code.
'get_node_macs': (
"vmsvc/device.getdevices {_NodeName_} | "
"grep macAddress | awk -F '\"' '{print $2}' || true"),
}
elif virt_type == "virsh":
# NOTE(NobodyCam): changes to the virsh commands will impact CI
# see https://review.openstack.org/83906
# Change-Id: I160e4202952b7551b855dc7d91784d6a184cb0ed
# for more detail.
virsh_cmds = {
'base_cmd': 'LC_ALL=C /usr/bin/virsh',
'start_cmd': 'start {_NodeName_}',
'stop_cmd': 'destroy {_NodeName_}',
'reboot_cmd': 'reset {_NodeName_}',
'list_all': "list --all | tail -n +2 | awk -F\" \" '{print $2}'",
'list_running': ("list --all|grep running | "
"awk -v qc='\"' -F\" \" '{print qc$2qc}'"),
'get_node_macs': ("dumpxml {_NodeName_} | "
"awk -F \"'\" '/mac address/{print $2}'| tr -d ':'"),
'set_boot_device': ("EDITOR=\"sed -i '/<boot \(dev\|order\)=*\>/d;"
"/<\/os>/i\<boot dev=\\\"{_BootDevice_}\\\"/>'\" "
"{_BaseCmd_} edit {_NodeName_}"),
'get_boot_device': ("{_BaseCmd_} dumpxml {_NodeName_} | "
"awk '/boot dev=/ { gsub( \".*dev=\" Q, \"\" ); "
"gsub( Q \".*\", \"\" ); print; }' "
"Q=\"'\" RS=\"[<>]\" | "
"head -1"),
}
if CONF.ssh.libvirt_uri:
virsh_cmds['base_cmd'] += ' --connect %s' % CONF.ssh.libvirt_uri
return virsh_cmds
elif virt_type == 'parallels':
return {
'base_cmd': 'LC_ALL=C /usr/bin/prlctl',
'start_cmd': 'start {_NodeName_}',
'stop_cmd': 'stop {_NodeName_} --kill',
'reboot_cmd': 'reset {_NodeName_}',
'list_all': "list -a -o name |tail -n +2",
'list_running': 'list -o name |tail -n +2',
'get_node_macs': ("list -j -i \"{_NodeName_}\" | "
"awk -F'\"' '/\"mac\":/ {print $4}' | "
"sed 's/\\(..\\)\\(..\\)\\(..\\)\\(..\\)\\(..\\)\\(..\\)/"
"\\1:\\2:\\3:\\4:\\5\\6/' | "
"tr '[:upper:]' '[:lower:]'"),
'set_boot_device': ("{_BaseCmd_} set {_NodeName_} "
"--device-bootorder \"{_BootDevice_}\""),
'get_boot_device': ("{_BaseCmd_} list -i {_NodeName_} | "
"awk '/^Boot order:/ {print $3}'"),
}
else:
raise exception.InvalidParameterValue(_(
"SSHPowerDriver '%(virt_type)s' is not a valid virt_type, ") %
{'virt_type': virt_type})
def _normalize_mac(mac):
return mac.replace('-', '').replace(':', '').lower()
def _get_boot_device(ssh_obj, driver_info):
"""Get the current boot device.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param driver_info: information for accessing the node.
:raises: SSHCommandFailed on an error from ssh.
:raises: NotImplementedError if the virt_type does not support
getting the boot device.
"""
cmd_to_exec = driver_info['cmd_set'].get('get_boot_device')
if cmd_to_exec:
boot_device_map = _get_boot_device_map(driver_info['virt_type'])
node_name = _get_hosts_name_for_node(ssh_obj, driver_info)
base_cmd = driver_info['cmd_set']['base_cmd']
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node_name)
cmd_to_exec = cmd_to_exec.replace('{_BaseCmd_}', base_cmd)
stdout, stderr = _ssh_execute(ssh_obj, cmd_to_exec)
return next((dev for dev, hdev in boot_device_map.items()
if hdev == stdout), None)
else:
raise NotImplementedError()
def _set_boot_device(ssh_obj, driver_info, device):
"""Set the boot device.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param driver_info: information for accessing the node.
:param device: the boot device.
:raises: SSHCommandFailed on an error from ssh.
:raises: NotImplementedError if the virt_type does not support
setting the boot device.
"""
cmd_to_exec = driver_info['cmd_set'].get('set_boot_device')
if cmd_to_exec:
node_name = _get_hosts_name_for_node(ssh_obj, driver_info)
base_cmd = driver_info['cmd_set']['base_cmd']
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node_name)
cmd_to_exec = cmd_to_exec.replace('{_BootDevice_}', device)
cmd_to_exec = cmd_to_exec.replace('{_BaseCmd_}', base_cmd)
_ssh_execute(ssh_obj, cmd_to_exec)
else:
raise NotImplementedError()
def _ssh_execute(ssh_obj, cmd_to_exec):
"""Executes a command via ssh.
Executes a command via ssh and returns a list of the lines of the
output from the command.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param cmd_to_exec: command to execute.
:returns: list of the lines of output from the command.
:raises: SSHCommandFailed on an error from ssh.
"""
try:
output_list = processutils.ssh_execute(ssh_obj,
cmd_to_exec)[0].split('\n')
except Exception as e:
LOG.debug("Cannot execute SSH cmd %(cmd)s. Reason: %(err)s."
% {'cmd': cmd_to_exec, 'err': e})
raise exception.SSHCommandFailed(cmd=cmd_to_exec)
return output_list
def _parse_driver_info(node):
"""Gets the information needed for accessing the node.
:param node: the Node of interest.
:returns: dictionary of information.
:raises: InvalidParameterValue if any required parameters are incorrect.
:raises: MissingParameterValue if any required parameters are missing.
"""
info = node.driver_info or {}
missing_info = [key for key in REQUIRED_PROPERTIES if not info.get(key)]
if missing_info:
raise exception.MissingParameterValue(_(
"SSHPowerDriver requires the following parameters to be set in "
"node's driver_info: %s.") % missing_info)
address = info.get('ssh_address')
username = info.get('ssh_username')
password = info.get('ssh_password')
try:
port = int(info.get('ssh_port', 22))
except ValueError:
raise exception.InvalidParameterValue(_(
"SSHPowerDriver requires ssh_port to be integer value"))
key_contents = info.get('ssh_key_contents')
key_filename = info.get('ssh_key_filename')
virt_type = info.get('ssh_virt_type')
# NOTE(deva): we map 'address' from API to 'host' for common utils
res = {
'host': address,
'username': username,
'port': port,
'virt_type': virt_type,
'uuid': node.uuid
}
cmd_set = _get_command_sets(virt_type)
res['cmd_set'] = cmd_set
# Only one credential may be set (avoids complexity around having
# precedence etc).
if len(filter(None, (password, key_filename, key_contents))) != 1:
raise exception.InvalidParameterValue(_(
"SSHPowerDriver requires one and only one of password, "
"key_contents and key_filename to be set."))
if password:
res['password'] = password
elif key_contents:
res['key_contents'] = key_contents
else:
if not os.path.isfile(key_filename):
raise exception.InvalidParameterValue(_(
"SSH key file %s not found.") % key_filename)
res['key_filename'] = key_filename
return res
def _get_power_status(ssh_obj, driver_info):
"""Returns a node's current power state.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param driver_info: information for accessing the node.
:returns: one of ironic.common.states POWER_OFF, POWER_ON.
:raises: NodeNotFound
"""
power_state = None
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['list_running'])
running_list = _ssh_execute(ssh_obj, cmd_to_exec)
# Command should return a list of running vms. If the current node is
# not listed then we can assume it is not powered on.
node_name = _get_hosts_name_for_node(ssh_obj, driver_info)
if node_name:
for node in running_list:
if not node:
continue
if node_name in node:
power_state = states.POWER_ON
break
if not power_state:
power_state = states.POWER_OFF
else:
err_msg = _LE('Node "%(host)s" with MAC address %(mac)s not found.')
LOG.error(err_msg, {'host': driver_info['host'],
'mac': driver_info['macs']})
raise exception.NodeNotFound(node=driver_info['host'])
return power_state
def _get_connection(node):
"""Returns an SSH client connected to a node.
:param node: the Node.
:returns: paramiko.SSHClient, an active ssh connection.
"""
return utils.ssh_connect(_parse_driver_info(node))
def _get_hosts_name_for_node(ssh_obj, driver_info):
"""Get the name the host uses to reference the node.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param driver_info: information for accessing the node.
:returns: the name or None if not found.
"""
matched_name = None
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['list_all'])
full_node_list = _ssh_execute(ssh_obj, cmd_to_exec)
LOG.debug("Retrieved Node List: %s" % repr(full_node_list))
# for each node check Mac Addresses
for node in full_node_list:
if not node:
continue
LOG.debug("Checking Node: %s's Mac address." % node)
cmd_to_exec = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['get_node_macs'])
cmd_to_exec = cmd_to_exec.replace('{_NodeName_}', node)
hosts_node_mac_list = _ssh_execute(ssh_obj, cmd_to_exec)
for host_mac in hosts_node_mac_list:
if not host_mac:
continue
for node_mac in driver_info['macs']:
if not node_mac:
continue
if _normalize_mac(host_mac) in _normalize_mac(node_mac):
LOG.debug("Found Mac address: %s" % node_mac)
matched_name = node
break
if matched_name:
break
if matched_name:
break
return matched_name
def _power_on(ssh_obj, driver_info):
"""Power ON this node.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param driver_info: information for accessing the node.
:returns: one of ironic.common.states POWER_ON or ERROR.
"""
current_pstate = _get_power_status(ssh_obj, driver_info)
if current_pstate == states.POWER_ON:
_power_off(ssh_obj, driver_info)
node_name = _get_hosts_name_for_node(ssh_obj, driver_info)
cmd_to_power_on = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['start_cmd'])
cmd_to_power_on = cmd_to_power_on.replace('{_NodeName_}', node_name)
_ssh_execute(ssh_obj, cmd_to_power_on)
current_pstate = _get_power_status(ssh_obj, driver_info)
if current_pstate == states.POWER_ON:
return current_pstate
else:
return states.ERROR
def _power_off(ssh_obj, driver_info):
"""Power OFF this node.
:param ssh_obj: paramiko.SSHClient, an active ssh connection.
:param driver_info: information for accessing the node.
:returns: one of ironic.common.states POWER_OFF or ERROR.
"""
current_pstate = _get_power_status(ssh_obj, driver_info)
if current_pstate == states.POWER_OFF:
return current_pstate
node_name = _get_hosts_name_for_node(ssh_obj, driver_info)
cmd_to_power_off = "%s %s" % (driver_info['cmd_set']['base_cmd'],
driver_info['cmd_set']['stop_cmd'])
cmd_to_power_off = cmd_to_power_off.replace('{_NodeName_}', node_name)
_ssh_execute(ssh_obj, cmd_to_power_off)
current_pstate = _get_power_status(ssh_obj, driver_info)
if current_pstate == states.POWER_OFF:
return current_pstate
else:
return states.ERROR
class SSHPower(base.PowerInterface):
"""SSH Power Interface.
This PowerInterface class provides a mechanism for controlling the power
state of virtual machines via SSH.
NOTE: This driver supports VirtualBox and Virsh commands.
NOTE: This driver does not currently support multi-node operations.
"""
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that the node's 'driver_info' is valid.
Check that the node's 'driver_info' contains the requisite fields
and that an SSH connection to the node can be established.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if any connection parameters are
incorrect or if ssh failed to connect to the node.
:raises: MissingParameterValue if no ports are enrolled for the given
node.
"""
if not driver_utils.get_node_mac_addresses(task):
raise exception.MissingParameterValue(_("Node %s does not have "
"any port associated with it.") % task.node.uuid)
try:
_get_connection(task.node)
except exception.SSHConnectFailed as e:
raise exception.InvalidParameterValue(_("SSH connection cannot"
" be established: %s") % e)
def get_power_state(self, task):
"""Get the current power state of the task's node.
Poll the host for the current power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:returns: power state. One of :class:`ironic.common.states`.
:raises: InvalidParameterValue if any connection parameters are
incorrect.
:raises: MissingParameterValue when a required parameter is missing
:raises: NodeNotFound.
:raises: SSHCommandFailed on an error from ssh.
:raises: SSHConnectFailed if ssh failed to connect to the node.
"""
driver_info = _parse_driver_info(task.node)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(task.node)
return _get_power_status(ssh_obj, driver_info)
@task_manager.require_exclusive_lock
def set_power_state(self, task, pstate):
"""Turn the power on or off.
Set the power state of the task's node.
:param task: a TaskManager instance containing the node to act on.
:param pstate: Either POWER_ON or POWER_OFF from :class:
`ironic.common.states`.
:raises: InvalidParameterValue if any connection parameters are
incorrect, or if the desired power state is invalid.
:raises: MissingParameterValue when a required parameter is missing
:raises: NodeNotFound.
:raises: PowerStateFailure if it failed to set power state to pstate.
:raises: SSHCommandFailed on an error from ssh.
:raises: SSHConnectFailed if ssh failed to connect to the node.
"""
driver_info = _parse_driver_info(task.node)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(task.node)
if pstate == states.POWER_ON:
state = _power_on(ssh_obj, driver_info)
elif pstate == states.POWER_OFF:
state = _power_off(ssh_obj, driver_info)
else:
raise exception.InvalidParameterValue(_("set_power_state called "
"with invalid power state %s.") % pstate)
if state != pstate:
raise exception.PowerStateFailure(pstate=pstate)
@task_manager.require_exclusive_lock
def reboot(self, task):
"""Cycles the power to the task's node.
Power cycles a node.
:param task: a TaskManager instance containing the node to act on.
:raises: InvalidParameterValue if any connection parameters are
incorrect.
:raises: MissingParameterValue when a required parameter is missing
:raises: NodeNotFound.
:raises: PowerStateFailure if it failed to set power state to POWER_ON.
:raises: SSHCommandFailed on an error from ssh.
:raises: SSHConnectFailed if ssh failed to connect to the node.
"""
driver_info = _parse_driver_info(task.node)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(task.node)
current_pstate = _get_power_status(ssh_obj, driver_info)
if current_pstate == states.POWER_ON:
_power_off(ssh_obj, driver_info)
state = _power_on(ssh_obj, driver_info)
if state != states.POWER_ON:
raise exception.PowerStateFailure(pstate=states.POWER_ON)
class SSHManagement(base.ManagementInterface):
def get_properties(self):
return COMMON_PROPERTIES
def validate(self, task):
"""Check that 'driver_info' contains SSH credentials.
Validates whether the 'driver_info' property of the supplied
task's node contains the required credentials information.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if any connection parameters are
incorrect.
:raises: MissingParameterValue if a required parameter is missing
"""
_parse_driver_info(task.node)
def get_supported_boot_devices(self):
"""Get a list of the supported boot devices.
:returns: A list with the supported boot devices defined
in :mod:`ironic.common.boot_devices`.
"""
return list(_BOOT_DEVICES_MAP.keys())
@task_manager.require_exclusive_lock
def set_boot_device(self, task, device, persistent=False):
"""Set the boot device for the task's node.
Set the boot device to use on next reboot of the node.
:param task: a task from TaskManager.
:param device: the boot device, one of
:mod:`ironic.common.boot_devices`.
:param persistent: Boolean value. True if the boot device will
persist to all future boots, False if not.
Default: False. Ignored by this driver.
:raises: InvalidParameterValue if an invalid boot device is
specified or if any connection parameters are incorrect.
:raises: MissingParameterValue if a required parameter is missing
:raises: SSHConnectFailed if ssh failed to connect to the node.
:raises: SSHCommandFailed on an error from ssh.
:raises: NotImplementedError if the virt_type does not support
setting the boot device.
"""
node = task.node
driver_info = _parse_driver_info(node)
if device not in self.get_supported_boot_devices():
raise exception.InvalidParameterValue(_(
"Invalid boot device %s specified.") % device)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(node)
boot_device_map = _get_boot_device_map(driver_info['virt_type'])
try:
_set_boot_device(ssh_obj, driver_info, boot_device_map[device])
except NotImplementedError:
LOG.error(_LE("Failed to set boot device for node %(node)s, "
"virt_type %(vtype)s does not support this "
"operation") % {'node': node.uuid,
'vtype': driver_info['virt_type']})
raise
def get_boot_device(self, task):
"""Get the current boot device for the task's node.
Provides the current boot device of the node. Be aware that not
all drivers support this.
:param task: a task from TaskManager.
:raises: InvalidParameterValue if any connection parameters are
incorrect.
:raises: MissingParameterValue if a required parameter is missing
:raises: SSHConnectFailed if ssh failed to connect to the node.
:raises: SSHCommandFailed on an error from ssh.
:returns: a dictionary containing:
:boot_device: the boot device, one of
:mod:`ironic.common.boot_devices` or None if it is unknown.
:persistent: Whether the boot device will persist to all
future boots or not, None if it is unknown.
"""
node = task.node
driver_info = _parse_driver_info(node)
driver_info['macs'] = driver_utils.get_node_mac_addresses(task)
ssh_obj = _get_connection(node)
response = {'boot_device': None, 'persistent': None}
try:
response['boot_device'] = _get_boot_device(ssh_obj, driver_info)
except NotImplementedError:
LOG.warning(_LW("Failed to get boot device for node %(node)s, "
"virt_type %(vtype)s does not support this "
"operation"),
{'node': node.uuid, 'vtype': driver_info['virt_type']})
return response
def get_sensors_data(self, task):
"""Get sensors data.
Not implemented by this driver.
:param task: a TaskManager instance.
"""
raise NotImplementedError()
|
|
#!/usr/bin/python
#
# Copyright (c) 2013 Juniper Networks, Inc. All rights reserved.
#
#
# log
#
# Query log messages from analytics
#
import sys
import argparse
import json
import datetime
import logging
import logging.handlers
import time
import re
from opserver_util import OpServerUtils
from sandesh_common.vns.ttypes import Module
from sandesh_common.vns.constants import ModuleNames, NodeTypeNames
import sandesh.viz.constants as VizConstants
from pysandesh.gen_py.sandesh.ttypes import SandeshType, SandeshLevel
from pysandesh.sandesh_logger import SandeshLogger
from pysandesh.util import UTCTimestampUsec
import commands
import ast
OBJECT_TYPE_LIST = [table_info.log_query_name for table_info in \
VizConstants._OBJECT_TABLES.values()]
OBJECT_TABLE_MAP = dict((table_info.log_query_name, table_name) for \
(table_name, table_info) in VizConstants._OBJECT_TABLES.items())
output_file_handle = None
class LogQuerier(object):
def __init__(self):
self._args = None
self._slogger = None
# end __init__
def run(self):
try:
if self.parse_args() != 0:
return
if self._args.tail:
start_time = UTCTimestampUsec() - 10*pow(10,6)
while True:
self._start_time = start_time
self._end_time = UTCTimestampUsec()
start_time = self._end_time + 1
time.sleep(3)
result = self.query()
if result == -1:
return
self.display(result)
else:
start_time = self._args.start_time
end_time = self._args.end_time
if not self._args.start_time:
start_time = "now-10m"
if not self._args.end_time:
end_time = "now"
try:
self._start_time, self._end_time = \
OpServerUtils.parse_start_end_time(
start_time = start_time,
end_time = end_time,
last = self._args.last)
except:
return -1
result = self.query()
if result == -1:
return
self.display(result)
except KeyboardInterrupt:
return
# Public functions
def parse_args(self):
"""
Eg. python log.py --analytics-api-ip 127.0.0.1
--analytics-api-port 8181
--source 127.0.0.1
--node-type Control
--module bgp | cfgm | vnswad
--instance-id 0
--message-type UveVirtualMachineConfigTrace
--category xmpp
--level SYS_INFO | SYS_ERROR
--object-type virtual-network | virtual-machine
--object-id name
--object-select-field ObjectLog | SystemLog
--reverse
--verbose
--raw
--trace BgpPeerTraceBuf
[--start-time now-10m --end-time now] | --last 10m
--send-syslog
--syslog-server 127.0.0.1
--syslog-port 514
--keywords comma,seperated,list
"""
defaults = {
'analytics_api_ip': '127.0.0.1',
'analytics_api_port': '8181',
}
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.set_defaults(**defaults)
parser.add_argument("--analytics-api-ip", help="IP address of Analytics API Server")
parser.add_argument("--analytics-api-port", help="Port of Analytics API Server")
parser.add_argument(
"--start-time", help="Logs start time (format now-10m, now-1h)")
parser.add_argument("--end-time", help="Logs end time")
parser.add_argument(
"--last", help="Logs from last time period (format 10m, 1d)")
parser.add_argument("--source", help="Logs from source address")
parser.add_argument("--node-type", help="Logs from node type",
choices=NodeTypeNames.values())
parser.add_argument(
"--module", help="Logs from module", choices=ModuleNames.values())
parser.add_argument("--instance-id", help="Logs from module instance")
parser.add_argument("--category", help="Logs of category")
parser.add_argument("--level", help="Logs of level")
parser.add_argument("--message-type", help="Logs of message type")
parser.add_argument("--reverse", action="store_true",
help="Show logs in reverse chronological order")
parser.add_argument(
"--verbose", action="store_true", help="Show internal information")
parser.add_argument(
"--raw", action="store_true", help="Show raw XML messages")
parser.add_argument(
"--object-type", help="Logs of object type", choices=OBJECT_TYPE_LIST)
parser.add_argument("--object-values", action="store_true",
help="Display list of object names")
parser.add_argument("--object-id", help="Logs of object name")
parser.add_argument(
"--object-select-field", help="Select field to filter the log",
choices=[VizConstants.OBJECT_LOG, VizConstants.SYSTEM_LOG])
parser.add_argument("--trace", help="Dump trace buffer")
parser.add_argument("--limit", help="Limit the number of messages")
parser.add_argument("--send-syslog", action="store_true",
help="Send syslog to specified server and port")
parser.add_argument("--syslog-server",
help="IP address of syslog server", default='localhost')
parser.add_argument("--syslog-port", help="Port to send syslog to",
type=int, default=514)
parser.add_argument("--tail","-f", help="Tail logs from now", action="store_true")
parser.add_argument("--keywords", help="comma seperated list of keywords")
parser.add_argument("--message-types", \
help="Display list of message type", action="store_true")
parser.add_argument("--output-file", "-o", help="redirect output to file")
parser.add_argument("--json", help="Dump output as json", action="store_true")
parser.add_argument("--all", action="store_true", help=argparse.SUPPRESS)
parser.add_argument("--admin-user", help="Name of admin user", default="admin")
parser.add_argument("--admin-password", help="Password of admin user",
default="contrail123")
self._args = parser.parse_args()
return 0
# end parse_args
# Public functions
def query(self):
if self._args.tail and (self._args.send_syslog or self._args.reverse or
self._args.start_time or self._args.end_time):
invalid_combination = " --tail"
if self._args.send_syslog:
invalid_combination += ", --send-syslog"
if self._args.reverse:
invalid_combination += ", --reverse"
if self._args.start_time:
invalid_combination += ", --start-time"
if self._args.end_time:
invalid_combination += ", --end-time"
print "Combination of options" + invalid_combination + " are not valid."
return -1
global output_file_handle
if self._args.output_file is not None:
if output_file_handle is None:
#Open the file for writing
try:
if self._args.tail:
output_file_handle = open(self._args.output_file, "a")
else:
output_file_handle = open(self._args.output_file, "w")
except Exception as e:
print e
print "Exception occured when creating/opening file %s" % \
self._args.output_file
return -1
start_time, end_time = self._start_time, self._end_time
if self._args.message_types is True:
command_str = ("contrail-stats --table FieldNames.fields" +
" --where name=MessageTable:Messagetype --select name fields.value" +
" --start-time " + str(start_time) +
" --end-time " + str(end_time) +
" --analytics-api-ip " + str(self._args.analytics_api_ip) +
" --analytics-api-port " + str(self._args.analytics_api_port))
res = commands.getoutput(command_str)
res = res.splitlines()
res = res[1:]
for r in res:
print ast.literal_eval(r)['fields.value']
return None
messages_url = OpServerUtils.opserver_query_url(
self._args.analytics_api_ip,
self._args.analytics_api_port)
where_msg = []
where_obj = []
and_filter = []
or_filter = []
if self._args.source is not None:
if self._args.source.endswith('*'):
val = self._args.source[:-1]
oper = OpServerUtils.MatchOp.PREFIX
else:
val = self._args.source
oper = OpServerUtils.MatchOp.EQUAL
source_match = OpServerUtils.Match(name=VizConstants.SOURCE,
value=val, op=oper)
where_msg.append(source_match.__dict__)
if self._args.module is not None:
module_match = OpServerUtils.Match(name=VizConstants.MODULE,
value=self._args.module,
op=OpServerUtils.MatchOp.EQUAL)
where_msg.append(module_match.__dict__)
if self._args.category is not None:
if self._args.category.endswith('*'):
val = self._args.category[:-1]
oper = OpServerUtils.MatchOp.PREFIX
else:
val = self._args.category
oper = OpServerUtils.MatchOp.EQUAL
category_match = OpServerUtils.Match(
name=VizConstants.CATEGORY,
value=val, op=oper)
where_msg.append(category_match.__dict__)
if self._args.message_type is not None:
if self._args.message_type.endswith('*'):
val = self._args.message_type[:-1]
oper = OpServerUtils.MatchOp.PREFIX
else:
val = self._args.message_type
oper = OpServerUtils.MatchOp.EQUAL
message_type_match = OpServerUtils.Match(
name=VizConstants.MESSAGE_TYPE,
value=val, op=oper)
where_msg.append(message_type_match.__dict__)
if self._args.level is not None:
level_match = OpServerUtils.Match(
name=VizConstants.LEVEL,
value=SandeshLevel._NAMES_TO_VALUES[self._args.level],
op=OpServerUtils.MatchOp.LEQ)
and_filter.append(level_match.__dict__)
if self._args.node_type is not None:
node_type_match = OpServerUtils.Match(
name=VizConstants.NODE_TYPE,
value=self._args.node_type,
op=OpServerUtils.MatchOp.EQUAL)
and_filter.append(node_type_match.__dict__)
if self._args.instance_id is not None:
instance_id_match = OpServerUtils.Match(
name=VizConstants.INSTANCE_ID,
value=self._args.instance_id,
op=OpServerUtils.MatchOp.EQUAL)
and_filter.append(instance_id_match.__dict__)
# Object logs :
# --object-type <> : All logs for the particular object type
# --object-type <> --object-values : Object-id values for the particular
# object tye
# --object-type <> --object-id <> : All logs matching object-id for
# particular object type
if (self._args.object_type is not None or
self._args.object_id is not None or
self._args.object_select_field is not None or
self._args.object_values is True):
# Validate object-type
if self._args.object_type is not None:
if self._args.object_type in OBJECT_TYPE_LIST:
if self._args.object_type in OBJECT_TABLE_MAP:
table = OBJECT_TABLE_MAP[self._args.object_type]
else:
print 'Table not found for object-type [%s]' % \
(self._args.object_type)
return -1
else:
print 'Unknown object-type [%s]' % (self._args.object_type)
return -1
else:
print 'Object-type required for query'
return -1
# Validate object-id and object-values
if self._args.object_id is not None and \
self._args.object_values is False:
object_id = self._args.object_id
if object_id.endswith("*"):
id_match = OpServerUtils.Match(
name=OpServerUtils.OBJECT_ID,
value=object_id[:-1],
op=OpServerUtils.MatchOp.PREFIX)
else:
id_match = OpServerUtils.Match(
name=OpServerUtils.OBJECT_ID,
value=object_id,
op=OpServerUtils.MatchOp.EQUAL)
where_obj.append(id_match.__dict__)
elif self._args.object_id is not None and \
self._args.object_values is True:
print 'Please specify either object-id or object-values but not both'
return -1
if self._args.object_values is False:
if self._args.object_select_field is not None:
obj_sel_field = self._args.object_select_field
if not isinstance(self._args.object_select_field, list):
obj_sel_field = [self._args.object_select_field]
if VizConstants.OBJECT_LOG or VizConstants.SYSTEM_LOG \
in obj_sel_field:
self._args.object_select_field = obj_sel_field
else:
print 'Invalid object-select-field. '\
'Valid values are "%s" or "%s"' \
% (VizConstants.OBJECT_LOG,
VizConstants.SYSTEM_LOG)
return -1
else:
self._args.object_select_field = obj_sel_field = [
VizConstants.OBJECT_LOG, VizConstants.SYSTEM_LOG]
select_list = [
VizConstants.TIMESTAMP,
VizConstants.SOURCE,
VizConstants.MODULE,
VizConstants.MESSAGE_TYPE,
] + obj_sel_field
else:
if self._args.object_select_field:
print 'Please specify either object-id with ' + \
'object-select-field or only object-values'
return -1
if len(where_msg):
options = [where['name'] for where in where_msg]
print 'Invalid/unsupported where-clause options %s for object-values query' % str(options)
return -1
select_list = [
OpServerUtils.OBJECT_ID
]
if len(where_obj) or len(where_msg):
where = [where_obj + where_msg]
else:
where = None
elif self._args.trace is not None:
table = VizConstants.COLLECTOR_GLOBAL_TABLE
if self._args.source is None:
print 'Source is required for trace buffer dump'
return -1
if self._args.module is None:
print 'Module is required for trace buffer dump'
return -1
trace_buf_match = OpServerUtils.Match(
name=VizConstants.CATEGORY,
value=self._args.trace,
op=OpServerUtils.MatchOp.EQUAL)
where_msg.append(trace_buf_match.__dict__)
where = [where_msg]
select_list = [
VizConstants.TIMESTAMP,
VizConstants.MESSAGE_TYPE,
VizConstants.SEQUENCE_NUM,
VizConstants.DATA,
VizConstants.SANDESH_TYPE
]
sandesh_type_filter = OpServerUtils.Match(
name=VizConstants.SANDESH_TYPE,
value=str(
SandeshType.TRACE),
op=OpServerUtils.MatchOp.EQUAL)
and_filter.append(sandesh_type_filter.__dict__)
else:
# Message Table Query
table = VizConstants.COLLECTOR_GLOBAL_TABLE
if len(where_msg):
where = [where_msg]
else:
where = None
select_list = [
VizConstants.TIMESTAMP,
VizConstants.SOURCE,
VizConstants.MODULE,
VizConstants.CATEGORY,
VizConstants.MESSAGE_TYPE,
VizConstants.SEQUENCE_NUM,
VizConstants.DATA,
VizConstants.SANDESH_TYPE,
VizConstants.LEVEL,
VizConstants.NODE_TYPE,
VizConstants.INSTANCE_ID,
]
filter = None
if len(or_filter):
filter = [and_filter+[filt] for filt in or_filter]
elif len(and_filter):
filter = [and_filter]
if self._args.keywords is not None:
p = re.compile('\s*,\s*|\s+')
if where is None:
where = [[]]
for kwd in p.split(self._args.keywords):
message_type_match = OpServerUtils.Match(
name=VizConstants.KEYWORD,
value=kwd,
op=OpServerUtils.MatchOp.EQUAL)
for wc in where:
wc.append(message_type_match.__dict__)
# Add sort by timestamp for non object value queries
sort_op = None
sort_fields = None
if self._args.object_values is False:
if self._args.reverse:
sort_op = OpServerUtils.SortOp.DESCENDING
else:
sort_op = OpServerUtils.SortOp.ASCENDING
sort_fields = [VizConstants.TIMESTAMP]
if self._args.limit:
limit = int(self._args.limit)
else:
limit = None
messages_query = OpServerUtils.Query(table,
start_time=start_time,
end_time=end_time,
select_fields=select_list,
where=where,
filter=filter,
sort=sort_op,
sort_fields=sort_fields,
limit=limit)
if self._args.verbose:
print 'Performing query: {0}'.format(
json.dumps(messages_query.__dict__))
resp = OpServerUtils.post_url_http(
messages_url, json.dumps(messages_query.__dict__),
self._args.admin_user, self._args.admin_password)
result = {}
if resp is not None:
resp = json.loads(resp)
qid = resp['href'].rsplit('/', 1)[1]
result = OpServerUtils.get_query_result(
self._args.analytics_api_ip, self._args.analytics_api_port, qid,
self._args.admin_user, self._args.admin_password)
return result
# end query
def output(self, log_str, sandesh_level):
if self._args.json:
if isinstance(log_str,dict):
#convert to json and dump
log_str=json.dumps(log_str)
if self._args.output_file is not None:
#Append to a file specified
try:
output_file_handle.write(log_str)
output_file_handle.write("\n")
return
except Exception as e:
print e
print "Exception occured when writing file %s" % \
self._args.output_file
return -1
if self._args.send_syslog:
syslog_level = SandeshLogger._SANDESH_LEVEL_TO_LOGGER_LEVEL[
sandesh_level]
self._logger.log(syslog_level, log_str)
else:
print log_str
#end output
def display(self, result):
if result == [] or result is None:
return
messages_dict_list = result
# Setup logger and syslog handler
if self._args.send_syslog:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
syslog_handler = logging.handlers.SysLogHandler(
address = (self._args.syslog_server, self._args.syslog_port))
contrail_formatter = logging.Formatter('contrail: %(message)s')
syslog_handler.setFormatter(contrail_formatter)
logger.addHandler(syslog_handler)
self._logger = logger
# For json we will be outputting list of dicts so open the list here
if self._args.json:
first = True
self.output('[', SandeshLevel.INVALID)
for messages_dict in messages_dict_list:
if VizConstants.TIMESTAMP in messages_dict:
message_dt = datetime.datetime.fromtimestamp(
int(messages_dict[VizConstants.TIMESTAMP]) /
OpServerUtils.USECS_IN_SEC)
message_dt += datetime.timedelta(
microseconds=
(int(messages_dict[VizConstants.TIMESTAMP]) %
OpServerUtils.USECS_IN_SEC))
message_ts = message_dt.strftime(OpServerUtils.TIME_FORMAT_STR)
else:
message_ts = 'Time: NA'
messages_dict[VizConstants.TIMESTAMP] = message_ts
if VizConstants.SOURCE in messages_dict:
source = messages_dict[VizConstants.SOURCE]
else:
source = 'Source: NA'
if VizConstants.NODE_TYPE in messages_dict:
node_type = messages_dict[VizConstants.NODE_TYPE]
else:
node_type = ''
if VizConstants.MODULE in messages_dict:
module = messages_dict[VizConstants.MODULE]
else:
module = 'Module: NA'
if VizConstants.INSTANCE_ID in messages_dict:
instance_id = messages_dict[VizConstants.INSTANCE_ID]
else:
instance_id = ''
if VizConstants.MESSAGE_TYPE in messages_dict:
message_type = messages_dict[VizConstants.MESSAGE_TYPE]
else:
message_type = 'Message Type: NA'
if VizConstants.SANDESH_TYPE in messages_dict:
sandesh_type = messages_dict[VizConstants.SANDESH_TYPE]
else:
sandesh_type = SandeshType.INVALID
# By default SYS_DEBUG
sandesh_level = SandeshLevel.SYS_DEBUG
if self._args.object_type is None:
if VizConstants.CATEGORY in messages_dict:
category = messages_dict[VizConstants.CATEGORY]
else:
category = 'Category: NA'
if VizConstants.LEVEL in messages_dict:
sandesh_level = messages_dict[VizConstants.LEVEL]
level = SandeshLevel._VALUES_TO_NAMES[sandesh_level]
else:
level = 'Level: NA'
messages_dict[VizConstants.LEVEL] = level
if VizConstants.SEQUENCE_NUM in messages_dict:
seq_num = messages_dict[VizConstants.SEQUENCE_NUM]
else:
seq_num = 'Sequence Number: NA'
if VizConstants.DATA in messages_dict:
# Convert XML data to dict
if self._args.raw:
data_str = messages_dict[VizConstants.DATA]
else:
OpServerUtils.messages_xml_data_to_dict(
messages_dict, VizConstants.DATA)
if isinstance(messages_dict[VizConstants.DATA], dict):
data_dict = messages_dict[VizConstants.DATA]
data_str = OpServerUtils.messages_data_dict_to_str(
data_dict, message_type, sandesh_type)
else:
data_str = messages_dict[VizConstants.DATA]
else:
data_str = 'Data not present'
if self._args.json:
if not first:
self.output(", ", sandesh_level)
else:
first = False
OpServerUtils.messages_dict_scrub(messages_dict)
self.output(messages_dict, sandesh_level)
else:
if self._args.trace is not None:
trace_str = '{0} {1}:{2} {3}'.format(
message_ts, message_type, seq_num, data_str)
self.output(trace_str, sandesh_level)
else:
log_str = \
'{0} {1} [{2}:{3}:{4}:{5}][{6}] : {7}:{8} {9}'.format(
message_ts, source, node_type, module, instance_id,
category, level, message_type, seq_num, data_str)
self.output(log_str, sandesh_level)
else:
if self._args.object_values is True:
if OpServerUtils.OBJECT_ID in messages_dict:
obj_str = messages_dict[OpServerUtils.OBJECT_ID]
print obj_str
continue
for obj_sel_field in self._args.object_select_field:
if obj_sel_field in messages_dict:
if self._args.raw:
data_str = messages_dict[obj_sel_field]
else:
# Convert XML data to dict
OpServerUtils.messages_xml_data_to_dict(
messages_dict, obj_sel_field)
if isinstance(messages_dict[obj_sel_field], dict):
data_dict = messages_dict[obj_sel_field]
data_str =\
OpServerUtils.messages_data_dict_to_str(
data_dict, message_type,
sandesh_type)
else:
data_str = messages_dict[obj_sel_field]
if data_str:
obj_str = '{0} {1} [{2}:{3}:{4}] : {5}: {6}'.format(
message_ts, source, node_type, module,
instance_id, message_type, data_str)
if self._args.json:
if not first:
self.output(", ", sandesh_level)
else:
first = False
OpServerUtils.messages_dict_scrub(messages_dict)
self.output(messages_dict, sandesh_level)
else:
self.output(obj_str, sandesh_level)
# For json we will be outputting list of dicts so close the list here
if self._args.json:
self.output(']', SandeshLevel.INVALID)
# end display
# end class LogQuerier
def main():
querier = LogQuerier()
querier.run()
# end main
if __name__ == "__main__":
main()
|
|
# Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import grpc
import yaml
from hapi.chart.config_pb2 import Config
from hapi.services.tiller_pb2 import GetReleaseContentRequest
from hapi.services.tiller_pb2 import GetReleaseStatusRequest
from hapi.services.tiller_pb2 import GetVersionRequest
from hapi.services.tiller_pb2 import InstallReleaseRequest
from hapi.services.tiller_pb2 import ListReleasesRequest
from hapi.services.tiller_pb2_grpc import ReleaseServiceStub
from hapi.services.tiller_pb2 import TestReleaseRequest
from hapi.services.tiller_pb2 import UninstallReleaseRequest
from hapi.services.tiller_pb2 import UpdateReleaseRequest
from oslo_config import cfg
from oslo_log import log as logging
from armada.const import STATUS_DEPLOYED, STATUS_FAILED
from armada.exceptions import tiller_exceptions as ex
from armada.handlers.k8s import K8s
from armada.utils.release import release_prefix
from armada.utils.release import label_selectors
TILLER_VERSION = b'2.7.2'
TILLER_TIMEOUT = 300
GRPC_EPSILON = 60
RELEASE_LIMIT = 128 # TODO(mark-burnett): There may be a better page size.
RUNTEST_SUCCESS = 9
# the standard gRPC max message size is 4MB
# this expansion comes at a performance penalty
# but until proper paging is supported, we need
# to support a larger payload as the current
# limit is exhausted with just 10 releases
MAX_MESSAGE_LENGTH = 429496729
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class TillerResult(object):
'''Object to hold Tiller results for Armada.'''
def __init__(self, release, namespace, status, description, version):
self.release = release
self.namespace = namespace
self.status = status
self.description = description
self.version = version
class Tiller(object):
'''
The Tiller class supports communication and requests to the Tiller Helm
service over gRPC
'''
def __init__(self, tiller_host=None, tiller_port=None,
tiller_namespace=None):
self.tiller_host = tiller_host
self.tiller_port = tiller_port or CONF.tiller_port
self.tiller_namespace = tiller_namespace or CONF.tiller_namespace
# init k8s connectivity
self.k8s = K8s()
# init Tiller channel
self.channel = self.get_channel()
# init timeout for all requests
# and assume eventually this will
# be fed at runtime as an override
self.timeout = TILLER_TIMEOUT
LOG.debug('Armada is using Tiller at: %s:%s, namespace=%s, timeout=%s',
self.tiller_host, self.tiller_port, self.tiller_namespace,
self.timeout)
@property
def metadata(self):
'''
Return Tiller metadata for requests
'''
return [(b'x-helm-api-client', TILLER_VERSION)]
def get_channel(self):
'''
Return a Tiller channel
'''
tiller_ip = self._get_tiller_ip()
tiller_port = self._get_tiller_port()
try:
LOG.debug('Tiller getting gRPC insecure channel at %s:%s '
'with options: [grpc.max_send_message_length=%s, '
'grpc.max_receive_message_length=%s]',
tiller_ip, tiller_port,
MAX_MESSAGE_LENGTH, MAX_MESSAGE_LENGTH)
return grpc.insecure_channel(
'%s:%s' % (tiller_ip, tiller_port),
options=[
('grpc.max_send_message_length', MAX_MESSAGE_LENGTH),
('grpc.max_receive_message_length', MAX_MESSAGE_LENGTH)
]
)
except Exception:
raise ex.ChannelException()
def _get_tiller_pod(self):
'''
Returns Tiller pod using the Tiller pod labels specified in the Armada
config
'''
pods = None
namespace = self._get_tiller_namespace()
pods = self.k8s.get_namespace_pod(namespace,
CONF.tiller_pod_labels).items
# No Tiller pods found
if not pods:
raise ex.TillerPodNotFoundException(CONF.tiller_pod_labels)
# Return first Tiller pod in running state
for pod in pods:
if pod.status.phase == 'Running':
LOG.debug('Found at least one Running Tiller pod.')
return pod
# No Tiller pod found in running state
raise ex.TillerPodNotRunningException()
def _get_tiller_ip(self):
'''
Returns the Tiller pod's IP address by searching all namespaces
'''
if self.tiller_host:
LOG.debug('Using Tiller host IP: %s', self.tiller_host)
return self.tiller_host
else:
pod = self._get_tiller_pod()
LOG.debug('Using Tiller pod IP: %s', pod.status.pod_ip)
return pod.status.pod_ip
def _get_tiller_port(self):
'''Stub method to support arbitrary ports in the future'''
LOG.debug('Using Tiller host port: %s', self.tiller_port)
return self.tiller_port
def _get_tiller_namespace(self):
LOG.debug('Using Tiller namespace: %s', self.tiller_namespace)
return self.tiller_namespace
def tiller_status(self):
'''
return if Tiller exist or not
'''
if self._get_tiller_ip():
LOG.debug('Getting Tiller Status: Tiller exists')
return True
LOG.debug('Getting Tiller Status: Tiller does not exist')
return False
def list_releases(self):
'''
List Helm Releases
'''
# TODO(MarshM possibly combine list_releases() with list_charts()
# since they do the same thing, grouping output differently
releases = []
stub = ReleaseServiceStub(self.channel)
# TODO(mark-burnett): Since we're limiting page size, we need to
# iterate through all the pages when collecting this list.
# NOTE(MarshM): `Helm List` defaults to returning Deployed and Failed,
# but this might not be a desireable ListReleasesRequest default.
req = ListReleasesRequest(limit=RELEASE_LIMIT,
status_codes=[STATUS_DEPLOYED,
STATUS_FAILED],
sort_by='LAST_RELEASED',
sort_order='DESC')
LOG.debug('Tiller ListReleases() with timeout=%s', self.timeout)
release_list = stub.ListReleases(req, self.timeout,
metadata=self.metadata)
for y in release_list:
# TODO(MarshM) this log is too noisy, fix later
# LOG.debug('Found release: %s', y.releases
releases.extend(y.releases)
return releases
def get_chart_templates(self, template_name, name, release_name, namespace,
chart, disable_hooks, values):
# returns some info
LOG.info("Template( %s ) : %s ", template_name, name)
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=True,
values=values,
name=name,
namespace=namespace,
wait=False)
templates = stub.InstallRelease(
release_request, self.timeout, metadata=self.metadata)
for template in yaml.load_all(
getattr(templates.release, 'manifest', [])):
if template_name == template.get('metadata', None).get(
'name', None):
LOG.info(template_name)
return template
def _pre_update_actions(self, actions, release_name, namespace, chart,
disable_hooks, values, timeout):
'''
:params actions - array of items actions
:params namespace - name of pod for actions
'''
try:
for action in actions.get('update', []):
name = action.get('name')
LOG.info('Updating %s ', name)
action_type = action.get('type')
labels = action.get('labels')
self.rolling_upgrade_pod_deployment(
name, release_name, namespace, labels,
action_type, chart, disable_hooks, values, timeout)
except Exception:
LOG.warn("Pre: Could not update anything, please check yaml")
raise ex.PreUpdateJobDeleteException(name, namespace)
try:
for action in actions.get('delete', []):
name = action.get('name')
action_type = action.get('type')
labels = action.get('labels', None)
self.delete_resources(release_name, name, action_type,
labels, namespace, timeout)
except Exception:
LOG.warn("PRE: Could not delete anything, please check yaml")
raise ex.PreUpdateJobDeleteException(name, namespace)
try:
for action in actions.get('create', []):
name = action.get("name")
action_type = action.get("type")
if "job" in action_type:
LOG.info("Creating %s in namespace: %s", name, namespace)
# TODO(MarshM) create_job_action does nothing but LOG.debug
self.k8s.create_job_action(name, action_type)
continue
except Exception:
LOG.warn("PRE: Could not create anything, please check yaml")
raise ex.PreUpdateJobCreateException(name, namespace)
def _post_update_actions(self, actions, namespace):
try:
for action in actions.get('create', []):
name = action.get("name")
action_type = action.get("type")
if "job" in action_type:
LOG.info("Creating %s in namespace: %s", name, namespace)
# TODO(MarshM) create_job_action does nothing but LOG.debug
self.k8s.create_job_action(name, action_type)
continue
except Exception:
LOG.warn("POST: Could not create anything, please check yaml")
raise ex.PreUpdateJobCreateException(name, namespace)
def list_charts(self):
'''
List Helm Charts from Latest Releases
Returns a list of tuples in the form:
(name, version, chart, values, status)
'''
LOG.debug('Getting known releases from Tiller...')
charts = []
for latest_release in self.list_releases():
try:
release = (
latest_release.name, latest_release.version,
latest_release.chart, latest_release.config.raw,
latest_release.info.status.Code.Name(
latest_release.info.status.code))
charts.append(release)
LOG.debug('Found release %s, version %s, status: %s',
release[0], release[1], release[4])
except (AttributeError, IndexError) as e:
LOG.debug('%s while getting releases: %s, ex=%s',
e.__class__.__name__, latest_release, e)
continue
return charts
def update_release(self, chart, release, namespace,
dry_run=False,
pre_actions=None,
post_actions=None,
disable_hooks=False,
values=None,
wait=False,
timeout=None):
'''
Update a Helm Release
'''
rel_timeout = self.timeout if not timeout else timeout
LOG.debug('Helm update release%s: wait=%s, timeout=%s',
(' (dry run)' if dry_run else ''),
wait, timeout)
if values is None:
values = Config(raw='')
else:
values = Config(raw=values)
self._pre_update_actions(pre_actions, release, namespace, chart,
disable_hooks, values, timeout)
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
release_request = UpdateReleaseRequest(
chart=chart,
dry_run=dry_run,
disable_hooks=disable_hooks,
values=values,
name=release,
wait=wait,
timeout=timeout)
update_msg = stub.UpdateRelease(
release_request, rel_timeout + GRPC_EPSILON,
metadata=self.metadata)
tiller_result = TillerResult(
update_msg.release.name,
update_msg.release.namespace,
update_msg.release.info.status.Code.Name(
update_msg.release.info.status.code),
update_msg.release.info.Description,
update_msg.release.version)
return tiller_result
except Exception:
LOG.exception('Error while updating release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Upgrade')
self._post_update_actions(post_actions, namespace)
def install_release(self, chart, release, namespace,
dry_run=False,
values=None,
wait=False,
timeout=None):
'''
Create a Helm Release
'''
rel_timeout = self.timeout if not timeout else timeout
LOG.debug('Helm install release%s: wait=%s, timeout=%s',
(' (dry run)' if dry_run else ''),
wait, timeout)
if values is None:
values = Config(raw='')
else:
values = Config(raw=values)
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
release_request = InstallReleaseRequest(
chart=chart,
dry_run=dry_run,
values=values,
name=release,
namespace=namespace,
wait=wait,
timeout=timeout)
install_msg = stub.InstallRelease(
release_request, rel_timeout + GRPC_EPSILON,
metadata=self.metadata)
tiller_result = TillerResult(
install_msg.release.name,
install_msg.release.namespace,
install_msg.release.info.status.Code.Name(
install_msg.release.info.status.code),
install_msg.release.info.Description,
install_msg.release.version)
return tiller_result
except Exception:
LOG.exception('Error while installing release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Install')
def testing_release(self, release, timeout=300, cleanup=True):
'''
:param release - name of release to test
:param timeout - runtime before exiting
:param cleanup - removes testing pod created
:returns - results of test pod
'''
LOG.debug("Helm test release %s, timeout=%s", release, timeout)
try:
stub = ReleaseServiceStub(self.channel)
release_request = TestReleaseRequest(
name=release, timeout=timeout, cleanup=cleanup)
content = self.get_release_content(release)
if not len(content.release.hooks):
LOG.info('No test found')
return False
if content.release.hooks[0].events[0] == RUNTEST_SUCCESS:
test = stub.RunReleaseTest(
release_request, self.timeout, metadata=self.metadata)
if test.running():
self.k8s.wait_get_completed_podphase(release)
test.cancel()
return self.get_release_status(release)
except Exception:
LOG.exception('Error while testing release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Test')
def get_release_status(self, release, version=0):
'''
:param release - name of release to test
:param version - version of release status
'''
LOG.debug('Helm getting release status for release=%s, version=%s',
release, version)
try:
stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseStatusRequest(
name=release, version=version)
release_status = stub.GetReleaseStatus(
status_request, self.timeout, metadata=self.metadata)
LOG.debug('GetReleaseStatus= %s', release_status)
return release_status
except Exception:
raise ex.GetReleaseStatusException(release, version)
def get_release_content(self, release, version=0):
'''
:param release - name of release to test
:param version - version of release status
'''
LOG.debug('Helm getting release content for release=%s, version=%s',
release, version)
try:
stub = ReleaseServiceStub(self.channel)
status_request = GetReleaseContentRequest(
name=release, version=version)
release_content = stub.GetReleaseContent(
status_request, self.timeout, metadata=self.metadata)
LOG.debug('GetReleaseContent= %s', release_content)
return release_content
except Exception:
raise ex.GetReleaseContentException(release, version)
def tiller_version(self):
'''
:returns - Tiller version
'''
try:
stub = ReleaseServiceStub(self.channel)
release_request = GetVersionRequest()
LOG.debug('Getting Tiller version, with timeout=%s', self.timeout)
tiller_version = stub.GetVersion(
release_request, self.timeout, metadata=self.metadata)
tiller_version = getattr(tiller_version.Version, 'sem_ver', None)
LOG.debug('Got Tiller version %s', tiller_version)
return tiller_version
except Exception:
LOG.debug('Failed to get Tiller version')
raise ex.TillerVersionException()
def uninstall_release(self, release, disable_hooks=False, purge=True):
'''
:params - release - Helm chart release name
:params - purge - deep delete of chart
deletes a Helm chart from Tiller
'''
# build release install request
try:
stub = ReleaseServiceStub(self.channel)
LOG.info("Uninstall %s release with disable_hooks=%s, "
"purge=%s flags", release, disable_hooks, purge)
release_request = UninstallReleaseRequest(
name=release, disable_hooks=disable_hooks, purge=purge)
return stub.UninstallRelease(
release_request, self.timeout, metadata=self.metadata)
except Exception:
LOG.exception('Error while uninstalling release %s', release)
status = self.get_release_status(release)
raise ex.ReleaseException(release, status, 'Delete')
def chart_cleanup(self, prefix, charts):
'''
:params charts - list of yaml charts
:params known_release - list of releases in tiller
:result - will remove any chart that is not present in yaml
'''
valid_charts = []
for gchart in charts:
for chart in gchart.get('chart_group'):
valid_charts.append(release_prefix(
prefix, chart.get('chart').get('name')))
actual_charts = [x.name for x in self.list_releases()]
chart_diff = list(set(actual_charts) - set(valid_charts))
for chart in chart_diff:
if chart.startswith(prefix):
LOG.debug("Release: %s will be removed", chart)
self.uninstall_release(chart)
def delete_resources(self, release_name, resource_name, resource_type,
resource_labels, namespace, wait=False,
timeout=TILLER_TIMEOUT):
'''
:params release_name - release name the specified resource is under
:params resource_name - name of specific resource
:params resource_type - type of resource e.g. job, pod, etc.
:params resource_labels - labels by which to identify the resource
:params namespace - namespace of the resource
Apply deletion logic based on type of resource
'''
label_selector = ''
if resource_labels is not None:
label_selector = label_selectors(resource_labels)
LOG.debug("Deleting resources in namespace %s matching "
"selectors %s.", namespace, label_selector)
handled = False
if resource_type == 'job':
get_jobs = self.k8s.get_namespace_job(namespace, label_selector)
for jb in get_jobs.items:
jb_name = jb.metadata.name
LOG.info("Deleting job %s in namespace: %s",
jb_name, namespace)
self.k8s.delete_job_action(jb_name, namespace, timeout=timeout)
handled = True
if resource_type == 'cronjob' or resource_type == 'job':
get_jobs = self.k8s.get_namespace_cron_job(
namespace, label_selector)
for jb in get_jobs.items:
jb_name = jb.metadata.name
LOG.info("Deleting cron job %s in namespace: %s",
jb_name, namespace)
if resource_type == 'job':
# TODO: Eventually disallow this, allowing initially since
# some existing clients were expecting this behavior.
LOG.warning("Deleting cron jobs via `type: job` is "
"deprecated, use `type: cronjob` instead")
self.k8s.delete_cron_job_action(jb_name, namespace)
handled = True
if resource_type == 'pod':
release_pods = self.k8s.get_namespace_pod(
namespace, label_selector)
for pod in release_pods.items:
pod_name = pod.metadata.name
LOG.info("Deleting pod %s in namespace: %s",
pod_name, namespace)
self.k8s.delete_namespace_pod(pod_name, namespace)
if wait:
self.k8s.wait_for_pod_redeployment(pod_name, namespace)
handled = True
if not handled:
LOG.error("Unable to execute name: %s type: %s ",
resource_name, resource_type)
def rolling_upgrade_pod_deployment(self, name, release_name, namespace,
resource_labels, action_type, chart,
disable_hooks, values,
timeout=TILLER_TIMEOUT):
'''
update statefullsets (daemon, stateful)
'''
if action_type == 'daemonset':
LOG.info('Updating: %s', action_type)
label_selector = ''
if resource_labels is not None:
label_selector = label_selectors(resource_labels)
get_daemonset = self.k8s.get_namespace_daemonset(
namespace=namespace, label=label_selector)
for ds in get_daemonset.items:
ds_name = ds.metadata.name
ds_labels = ds.metadata.labels
if ds_name == name:
LOG.info("Deleting %s : %s in %s", action_type, ds_name,
namespace)
self.k8s.delete_daemon_action(ds_name, namespace)
# update the daemonset yaml
template = self.get_chart_templates(
ds_name, name, release_name, namespace, chart,
disable_hooks, values)
template['metadata']['labels'] = ds_labels
template['spec']['template']['metadata'][
'labels'] = ds_labels
self.k8s.create_daemon_action(
namespace=namespace, template=template)
# delete pods
self.delete_resources(
release_name, name, 'pod', resource_labels, namespace,
wait=True, timeout=timeout)
else:
LOG.error("Unable to exectue name: % type: %s", name, action_type)
|
|
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from BattleBase import *
from direct.actor import Actor
from toontown.suit import SuitDNA
from direct.directnotify import DirectNotifyGlobal
import DistributedBattleBase
from toontown.toon import TTEmote
from otp.avatar import Emote
from toontown.toonbase import TTLocalizer
import MovieUtil
from direct.fsm import State
from toontown.suit import Suit
import SuitBattleGlobals
import random
from toontown.toonbase import ToontownGlobals
class DistributedBattleBldg(DistributedBattleBase.DistributedBattleBase):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleBldg')
camFOFov = 30.0
camFOPos = Point3(0, -10, 4)
def __init__(self, cr):
townBattle = cr.playGame.getPlace().townBattle
DistributedBattleBase.DistributedBattleBase.__init__(self, cr, townBattle)
self.streetBattle = 0
self.fsm.addState(State.State('BuildingReward', self.enterBuildingReward, self.exitBuildingReward, ['Resume']))
offState = self.fsm.getStateNamed('Off')
offState.addTransition('BuildingReward')
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('BuildingReward')
def generate(self):
DistributedBattleBase.DistributedBattleBase.generate(self)
def setBossBattle(self, value):
self.bossBattle = value
if self.bossBattle:
self.battleMusic = base.loadMusic('phase_7/audio/bgm/encntr_suit_winning_indoor.mid')
else:
self.battleMusic = base.loadMusic('phase_7/audio/bgm/encntr_general_bg_indoor.mid')
base.playMusic(self.battleMusic, looping=1, volume=0.9)
def getBossBattleTaunt(self):
return TTLocalizer.BattleBldgBossTaunt
def disable(self):
DistributedBattleBase.DistributedBattleBase.disable(self)
self.battleMusic.stop()
def delete(self):
DistributedBattleBase.DistributedBattleBase.delete(self)
del self.battleMusic
def buildJoinPointList(self, avPos, destPos, toon = 0):
return []
def __faceOff(self, ts, name, callback):
if len(self.suits) == 0:
self.notify.warning('__faceOff(): no suits.')
return
if len(self.toons) == 0:
self.notify.warning('__faceOff(): no toons.')
return
elevatorPos = self.toons[0].getPos()
if len(self.suits) == 1:
leaderIndex = 0
elif self.bossBattle == 1:
leaderIndex = 1
else:
maxTypeNum = -1
for suit in self.suits:
suitTypeNum = SuitDNA.getSuitType(suit.dna.name)
if maxTypeNum < suitTypeNum:
maxTypeNum = suitTypeNum
leaderIndex = self.suits.index(suit)
delay = FACEOFF_TAUNT_T
suitTrack = Parallel()
suitLeader = None
for suit in self.suits:
suit.setState('Battle')
suitIsLeader = 0
oneSuitTrack = Sequence()
oneSuitTrack.append(Func(suit.loop, 'neutral'))
oneSuitTrack.append(Func(suit.headsUp, elevatorPos))
if self.suits.index(suit) == leaderIndex:
suitLeader = suit
suitIsLeader = 1
if self.bossBattle == 1:
taunt = self.getBossBattleTaunt()
else:
taunt = SuitBattleGlobals.getFaceoffTaunt(suit.getStyleName(), suit.doId)
oneSuitTrack.append(Func(suit.setChatAbsolute, taunt, CFSpeech | CFTimeout))
destPos, destHpr = self.getActorPosHpr(suit, self.suits)
oneSuitTrack.append(Wait(delay))
if suitIsLeader == 1:
oneSuitTrack.append(Func(suit.clearChat))
oneSuitTrack.append(self.createAdjustInterval(suit, destPos, destHpr))
suitTrack.append(oneSuitTrack)
toonTrack = Parallel()
for toon in self.toons:
oneToonTrack = Sequence()
destPos, destHpr = self.getActorPosHpr(toon, self.toons)
oneToonTrack.append(Wait(delay))
oneToonTrack.append(self.createAdjustInterval(toon, destPos, destHpr, toon=1, run=1))
toonTrack.append(oneToonTrack)
camTrack = Sequence()
def setCamFov(fov):
base.camLens.setFov(fov)
camTrack.append(Func(camera.wrtReparentTo, suitLeader))
camTrack.append(Func(setCamFov, self.camFOFov))
suitHeight = suitLeader.getHeight()
suitOffsetPnt = Point3(0, 0, suitHeight)
MidTauntCamHeight = suitHeight * 0.66
MidTauntCamHeightLim = suitHeight - 1.8
if MidTauntCamHeight < MidTauntCamHeightLim:
MidTauntCamHeight = MidTauntCamHeightLim
TauntCamY = 18
TauntCamX = 0
TauntCamHeight = random.choice((MidTauntCamHeight, 1, 11))
camTrack.append(Func(camera.setPos, TauntCamX, TauntCamY, TauntCamHeight))
camTrack.append(Func(camera.lookAt, suitLeader, suitOffsetPnt))
camTrack.append(Wait(delay))
camPos = Point3(0, -6, 4)
camHpr = Vec3(0, 0, 0)
camTrack.append(Func(camera.reparentTo, base.localAvatar))
camTrack.append(Func(setCamFov, ToontownGlobals.DefaultCameraFov))
camTrack.append(Func(camera.setPosHpr, camPos, camHpr))
mtrack = Parallel(suitTrack, toonTrack, camTrack)
done = Func(callback)
track = Sequence(mtrack, done, name=name)
track.start(ts)
self.storeInterval(track, name)
return
def enterFaceOff(self, ts):
if len(self.toons) > 0 and base.localAvatar == self.toons[0]:
Emote.globalEmote.disableAll(self.toons[0], 'dbattlebldg, enterFaceOff')
self.delayDeleteMembers()
self.__faceOff(ts, self.faceOffName, self.__handleFaceOffDone)
return None
def __handleFaceOffDone(self):
self.notify.debug('FaceOff done')
self.d_faceOffDone(base.localAvatar.doId)
def exitFaceOff(self):
self.notify.debug('exitFaceOff()')
if len(self.toons) > 0 and base.localAvatar == self.toons[0]:
Emote.globalEmote.releaseAll(self.toons[0], 'dbattlebldg exitFaceOff')
self.clearInterval(self.faceOffName)
self._removeMembersKeep()
camera.wrtReparentTo(self)
base.camLens.setFov(self.camFov)
return None
def __playReward(self, ts, callback):
toonTracks = Parallel()
for toon in self.toons:
toonTracks.append(Sequence(Func(toon.loop, 'victory'), Wait(FLOOR_REWARD_TIMEOUT), Func(toon.loop, 'neutral')))
name = self.uniqueName('floorReward')
track = Sequence(toonTracks, Func(callback), name=name)
camera.setPos(0, 0, 1)
camera.setHpr(180, 10, 0)
self.storeInterval(track, name)
track.start(ts)
def enterReward(self, ts):
self.notify.debug('enterReward()')
self.delayDeleteMembers()
self.__playReward(ts, self.__handleFloorRewardDone)
return None
def __handleFloorRewardDone(self):
return None
def exitReward(self):
self.notify.debug('exitReward()')
self.clearInterval(self.uniqueName('floorReward'))
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
for toon in self.toons:
toon.startSmooth()
return None
def enterBuildingReward(self, ts):
self.delayDeleteMembers()
if self.hasLocalToon():
NametagGlobals.setMasterArrowsOn(0)
self.movie.playReward(ts, self.uniqueName('building-reward'), self.__handleBuildingRewardDone, noSkip=True)
return None
def __handleBuildingRewardDone(self):
if self.hasLocalToon():
self.d_rewardDone(base.localAvatar.doId)
self.movie.resetReward()
self.fsm.request('Resume')
def exitBuildingReward(self):
self.movie.resetReward(finish=1)
self._removeMembersKeep()
NametagGlobals.setMasterArrowsOn(1)
return None
def enterResume(self, ts = 0):
if self.hasLocalToon():
self.removeLocalToon()
return None
def exitResume(self):
return None
|
|
# Filename: daq.py
# pylint: disable=R0903
"""
Pumps for the DAQ data formats.
"""
from collections import namedtuple
from io import BytesIO
import json
import math
import struct
from struct import unpack
import time
import pprint
from urllib.request import urlopen, URLError
import numpy as np
from thepipe import Module, Blob
from km3pipe.dataclasses import Table
from km3pipe.sys import ignored
from km3pipe.logger import get_logger, get_printer
__author__ = "Tamas Gal"
__copyright__ = "Copyright 2016, Tamas Gal and the KM3NeT collaboration."
__credits__ = []
__license__ = "MIT"
__maintainer__ = "Tamas Gal"
__email__ = "[email protected]"
__status__ = "Development"
log = get_logger(__name__) # pylint: disable=C0103
DATA_TYPES = {
101: "DAQSuperFrame",
201: "DAQSummaryFrame",
# Using the same class for all timeslices since they are structurally
# identical (until now)
1001: "DAQTimeslice", # Type of erroneous timeslice data
1002: "DAQTimeslice", # L0
1003: "DAQTimeslice", # L1
1004: "DAQTimeslice", # L2
1005: "DAQTimeslice", # SN
2001: "DAQSummaryslice",
10001: "DAQEvent",
}
MINIMAL_RATE_HZ = 2.0e3
MAXIMAL_RATE_HZ = 2.0e6
class TimesliceParser(Module):
"""Preliminary parser for DAQTimeslice"""
def configure(self):
self.legacy = self.get("legacy", default=False)
def _get_raw_data(self, blob):
if "CHPrefix" in blob:
if not str(blob["CHPrefix"].tag).startswith("IO_TS"):
log.info("Not an IO_TS* blob")
return blob
return BytesIO(blob["CHData"])
if "FileIO" in blob:
return blob["FileIO"]
if "RawBytes" in blob:
return BytesIO(blob["RawBytes"])
def process(self, blob):
data = self._get_raw_data(blob)
if data is None:
return blob
try:
ts_info, ts_frameinfos, ts_hits = self._parse_timeslice(data)
except struct.error:
log.error("Could not parse Timeslice")
log.error(blob.keys())
else:
blob["TSHits"] = ts_hits
blob["TimesliceInfo"] = ts_info
blob["TimesliceFrameInfos"] = ts_frameinfos
return blob
def _parse_timeslice(self, data):
tsl_size, datatype = unpack("<ii", data.read(8))
if not self.legacy:
version = unpack("<h", data.read(2))[0]
if version != 1:
raise ValueError(
"Unsupported DAQTimeslice version ({}) or legacy DAQ. "
"Make sure Jpp v13+ is used or pass 'legacy=True' "
"to {}.".format(version, self.__class__.__name__)
)
det_id, run, sqnr = unpack("<iii", data.read(12))
timestamp, ns_ticks, n_frames = unpack("<iii", data.read(12))
ts_info = Table.from_template(
{
"frame_index": sqnr,
"slice_id": 0,
"timestamp": timestamp,
"nanoseconds": ns_ticks * 16,
"n_frames": n_frames,
},
"TimesliceInfo",
)
ts_frameinfos = {}
_dom_ids = []
_channel_ids = []
_times = []
_tots = []
for _ in range(n_frames):
frame_size, datatype = unpack("<ii", data.read(8))
det_id, run, sqnr = unpack("<iii", data.read(12))
timestamp, ns_ticks, dom_id = unpack("<iii", data.read(12))
dataqueue_status = unpack("<i", data.read(4))[0]
dom_status = unpack("<iiii", data.read(4 * 4))
n_hits = unpack("<i", data.read(4))[0]
ts_frameinfos[dom_id] = Table.from_template(
{
"det_id": det_id,
"run_id": run,
"sqnr": sqnr,
"timestamp": timestamp,
"nanoseconds": ns_ticks * 16,
"dom_id": dom_id,
"dataqueue_status": dataqueue_status,
"dom_status": dom_status,
"n_hits": n_hits,
},
"TimesliceFrameInfo",
)
for j in range(n_hits):
hit = unpack("!BlB", data.read(6))
_dom_ids.append(dom_id)
_channel_ids.append(hit[0])
_times.append(hit[1])
_tots.append(hit[2])
ts_hits = Table(
{
"channel_id": np.array(_channel_ids),
"dom_id": np.array(_dom_ids),
"time": np.array(_times),
"tot": np.array(_tots),
},
name="TimesliceHits",
h5loc="/timeslice_hits",
split_h5=True,
)
return ts_info, ts_frameinfos, ts_hits
class RePump(Module):
"""A pump for binary DAQ files.
This pump can be used to replay raw dumps e.g. created with the ``daqsample``
tool. It creates the same structures as the ``kp.io.ch.CHPump`` and thus
suited to test online processing pipelines with offline files.
"""
def configure(self):
self.filename = self.require("filename")
self.fobj = open(self.filename, "rb")
def process(self, blob):
try:
length, data_type = unpack("<ii", self.fobj.read(8))
self.fobj.seek(-8, 1)
except struct.error:
raise StopIteration
data = self.fobj.read(length)
blob["RawBytes"] = data
return blob
def finish(self):
self.fobj.close()
class DAQPump(Module):
"""A pump for binary DAQ files. Deprecated!"""
def configure(self):
self.filename = self.require("filename")
self.legacy = self.get("legacy", default=False)
self.frame_positions = []
self.index = 0
self.blob_file = self.open_file(self.filename)
self.determine_frame_positions()
def next_blob(self):
"""Get the next frame from file"""
blob_file = self.blob_file
try:
preamble = DAQPreamble(file_obj=blob_file)
except struct.error:
raise StopIteration
try:
data_type = DATA_TYPES[preamble.data_type]
except KeyError:
log.error("Unknown datatype: {0}".format(preamble.data_type))
data_type = "Unknown"
blob = Blob()
blob[data_type] = None
blob["DAQPreamble"] = preamble
if data_type == "DAQSummaryslice":
daq_frame = DAQSummaryslice(blob_file, legacy=self.legacy)
blob[data_type] = daq_frame
blob["DAQHeader"] = daq_frame.header
elif data_type == "DAQEvent":
daq_frame = DAQEvent(blob_file, legacy=self.legacy)
blob[data_type] = daq_frame
blob["DAQHeader"] = daq_frame.header
else:
log.warning(
"Skipping DAQ frame with data type code '{0}'.".format(
preamble.data_type
)
)
blob_file.seek(preamble.length - DAQPreamble.size, 1)
return blob
def seek_to_frame(self, index):
"""Move file pointer to the frame with given index."""
pointer_position = self.frame_positions[index]
self.blob_file.seek(pointer_position, 0)
def get_blob(self, index):
"""Return blob at given index."""
self.seek_to_frame(index)
return self.next_blob()
def determine_frame_positions(self):
"""Record the file pointer position of each frame"""
self.blob_file.seek(0, 0)
with ignored(struct.error):
while True:
pointer_position = self.blob_file.tell()
length = struct.unpack("<i", self.blob_file.read(4))[0]
self.blob_file.seek(length - 4, 1)
self.frame_positions.append(pointer_position)
self.blob_file.seek(0, 0)
log.info("Found {0} frames.".format(len(self.frame_positions)))
def process(self, blob):
"""Pump the next blob to the modules"""
return self.next_blob()
def finish(self):
"""Clean everything up"""
self.blob_file.close()
def __len__(self):
if not self.frame_positions:
self.determine_frame_positions()
return len(self.frame_positions)
def __iter__(self):
return self
def __next__(self):
try:
blob = self.get_blob(self.index)
except IndexError:
self.index = 0
raise StopIteration
self.index += 1
return blob
def __getitem__(self, index):
if isinstance(index, int):
return self.get_blob(index)
elif isinstance(index, slice):
return self._slice_generator(index)
else:
raise TypeError("index must be int or slice")
def _slice_generator(self, index):
"""A simple slice generator for iterations"""
start, stop, step = index.indices(len(self))
for i in range(start, stop, step):
yield self.get_blob(i)
class DAQProcessor(Module):
def configure(self):
self.legacy = self.get("legacy", default=False)
self.index = 0
self.event_id = 0
def process(self, blob):
tag = str(blob["CHPrefix"].tag)
data = blob["CHData"]
processor = None
if tag == "IO_EVT":
processor = self.process_event
if tag == "IO_SUM":
processor = self.process_summaryslice
if tag == "IO_OLINE":
processor = self.process_online_reco
if processor is None:
self.log.error("Unsupported tag: %s", tag)
return
try:
processor(data, blob)
except (struct.error, ValueError) as e:
self.log.error("Corrupt data received. Skipping...\n" "Error: %s", e)
return
return blob
def process_event(self, data, blob):
data_io = BytesIO(data)
preamble = DAQPreamble(file_obj=data_io) # noqa
event = DAQEvent(file_obj=data_io, legacy=self.legacy)
header = event.header
event_info = Table.from_template(
{
"det_id": header.det_id,
# 'frame_index': self.index, # header.time_slice,
"frame_index": header.time_slice,
"livetime_sec": 0,
"mc_id": 0,
"mc_t": 0,
"n_events_gen": 0,
"n_files_gen": 0,
"overlays": event.overlays,
"trigger_counter": event.trigger_counter,
"trigger_mask": event.trigger_mask,
"utc_nanoseconds": header.ticks * 16,
"utc_seconds": header.time_stamp,
"weight_w1": 0,
"weight_w2": 0,
"weight_w3": 0, # MC weights
"run_id": header.run, # run id
"group_id": self.event_id,
},
"EventInfo",
)
blob["EventInfo"] = event_info
self.event_id += 1
self.index += 1
hits = event.snapshot_hits
n_hits = event.n_snapshot_hits
if n_hits == 0:
self.log.warning("No hits found in event.")
return
dom_ids, channel_ids, times, tots = zip(*hits)
triggereds = np.zeros(n_hits)
triggered_map = {}
for triggered_hit in event.triggered_hits:
dom_id, pmt_id, time, tot, _ = triggered_hit
triggered_map[(dom_id, pmt_id, time, tot)] = True
for idx, hit in enumerate(hits):
triggereds[idx] = hit in triggered_map
hit_series = Table.from_template(
{
"channel_id": channel_ids,
"dom_id": dom_ids,
"time": times,
"tot": tots,
"triggered": triggereds,
"group_id": self.event_id,
},
"Hits",
)
blob["Hits"] = hit_series
def process_summaryslice(self, data, blob):
data_io = BytesIO(data)
preamble = DAQPreamble(file_obj=data_io) # noqa
summaryslice = DAQSummaryslice(file_obj=data_io, legacy=self.legacy)
blob["RawSummaryslice"] = summaryslice
def process_online_reco(self, data, blob):
data_io = BytesIO(data)
preamble = DAQPreamble(file_obj=data_io) # noqa
_data = unpack("<iiiQI", data_io.read(4 + 4 + 4 + 8 + 4))
det_id, run_id, frame_index, trigger_counter, utc_seconds = _data
shower_reco = unpack("9d", data_io.read(9 * 8))
shower_meta = unpack("3i", data_io.read(12))
track_reco = unpack("9d", data_io.read(9 * 8))
track_meta = unpack("3i", data_io.read(12))
print(
"Shower: x/y/z/dx/dy/dz/E/Q/t (type/status/ndf): ", shower_reco, shower_meta
)
print("Track: x/y/z/dx/dy/dz/E/Q/t (type/status/ndf): ", track_reco, track_meta)
blob["ReconstructionInfo"] = Table(
{
"det_id": det_id,
"run_id": run_id,
"frame_index": frame_index,
"trigger_counter": trigger_counter,
"utc_seconds": utc_seconds,
},
h5loc="reco",
split_h5=True,
name="Reconstructions",
)
args = track_reco + track_meta
blob["RecoTrack"] = RecoTrack(*args)
args = shower_reco + shower_meta
blob["RecoShower"] = RecoShower(*args)
RecoTrack = namedtuple("RecoTrack", "x y z dx dy dz E Q t type status ndf")
RecoShower = namedtuple("RecoShower", "x y z dx dy dz E Q t type status ndf")
class DAQPreamble(object):
"""Wrapper for the JDAQPreamble binary format.
Parameters
----------
byte_data : bytes (optional)
The binary file, where the file pointer is at the beginning of the header.
file_obj : file (optional)
The binary file, where the file pointer is at the beginning of the header.
Attributes
----------
size : int
The size of the original DAQ byte representation.
data_type : int
The data type of the following frame. The coding is stored in the
``DATA_TYPES``.
"""
size = 8
def __init__(self, byte_data=None, file_obj=None):
self.length = None
self.data_type = None
if byte_data:
self._parse_byte_data(byte_data)
if file_obj:
self._parse_file(file_obj)
def _parse_byte_data(self, byte_data):
"""Extract the values from byte string."""
self.length, self.data_type = unpack("<ii", byte_data[: self.size])
def _parse_file(self, file_obj):
"""Directly read from file handler.
Note that this will move the file pointer.
"""
byte_data = file_obj.read(self.size)
self._parse_byte_data(byte_data)
def __repr__(self):
description = "Length: {0}\nDataType: {1}".format(self.length, self.data_type)
return description
class DAQHeader(object):
"""Wrapper for the JDAQHeader binary format.
Parameters
----------
byte_data : bytes (optional)
The binary file, where the file pointer is at the beginning of the header.
file_obj : file (optional)
The binary file, where the file pointer is at the beginning of the header.
Attributes
----------
size : int
The size of the original DAQ byte representation.
"""
size = 20
def __init__(self, byte_data=None, file_obj=None):
self.run = None
self.time_slice = None
self.time_stamp = None
if byte_data:
self._parse_byte_data(byte_data)
if file_obj:
self._parse_file(file_obj)
def _parse_byte_data(self, byte_data):
"""Extract the values from byte string."""
chunks = unpack("<iiiii", byte_data[: self.size])
det_id, run, time_slice, time_stamp, ticks = chunks
self.det_id = det_id
self.run = run
self.time_slice = time_slice
self.time_stamp = time_stamp
self.ticks = ticks
def _parse_file(self, file_obj):
"""Directly read from file handler.
Note:
This will move the file pointer.
"""
byte_data = file_obj.read(self.size)
self._parse_byte_data(byte_data)
def __repr__(self):
description = "Run: {0}\nTime slice: {1}\nTime stamp: {2} ({3})".format(
self.run, self.time_slice, self.time_stamp, self.ticks
)
return description
class DAQSummaryslice(object):
"""Wrapper for the JDAQSummarySlice binary format.
Parameters
----------
file_obj : file (optional)
The binary file, where the file pointer is at the beginning of the header.
Attributes
----------
n_summary_frames : int
The number of summary frames.
summary_frames : dict
The PMT rates for each DOM. The key is the DOM identifier and the
corresponding value is a sorted list of PMT rates in [Hz].
dom_rates : dict
The overall DOM rate for each DOM.
"""
def __init__(self, file_obj, legacy=False):
if not legacy:
version = unpack("<h", file_obj.read(2))[0]
if version != 6:
raise ValueError(
"Unsupported {} version ({}) or legacy DAQ. "
"Make sure Jpp v13+ is used or pass 'legacy=True' "
"to the init.".format(self.__class__.__name__, version)
)
self.header = DAQHeader(file_obj=file_obj)
self.n_summary_frames = unpack("<i", file_obj.read(4))[0]
self.summary_frames = {}
self.dq_status = {}
self.dom_status = {}
self.dom_rates = {}
self._parse_summary_frames(file_obj)
def _parse_summary_frames(self, file_obj):
"""Iterate through the byte data and fill the summary_frames"""
for _ in range(self.n_summary_frames):
dom_id = unpack("<i", file_obj.read(4))[0]
dq_status = file_obj.read(4) # probably dom status? # noqa
dom_status = unpack("<iiii", file_obj.read(16))
raw_rates = unpack("b" * 31, file_obj.read(31))
pmt_rates = [self._get_rate(value) for value in raw_rates]
self.summary_frames[dom_id] = pmt_rates
self.dq_status[dom_id] = dq_status
self.dom_status[dom_id] = dom_status
self.dom_rates[dom_id] = np.sum(pmt_rates)
def _get_rate(self, value):
"""Return the rate in Hz from the short int value"""
if value == 0:
return 0
else:
return MINIMAL_RATE_HZ * math.exp(value * self._get_factor())
def _get_factor(self):
return math.log(MAXIMAL_RATE_HZ / MINIMAL_RATE_HZ) / 255
class DAQEvent(object):
"""Wrapper for the JDAQEvent binary format.
Parameters
----------
file_obj : file
The binary file, where the file pointer is at the beginning of the header.
Attributes
----------
trigger_counter : int
Incremental identifier of the occurred trigger.
trigger_mask : int
The trigger type(s) satisfied.
overlays : int
Number of merged events.
n_triggered_hits : int
Number of hits satisfying the trigger conditions.
n_snapshot_hits : int
Number of snapshot hits.
triggered_hits : list
A list of triggered hits (dom_id, pmt_id, tdc_time, tot, (trigger_mask,))
snapshot_hits : list
A list of snapshot hits (dom_id, pmt_id, tdc_time, tot)
"""
def __init__(self, file_obj, legacy=False):
if not legacy:
version = unpack("<h", file_obj.read(2))[0]
if version != 4:
raise ValueError(
"Unsupported {} version ({}) or legacy DAQ. "
"Make sure Jpp v13+ is used or pass 'legacy=True' "
"to the init.".format(self.__class__.__name__, version)
)
self.header = DAQHeader(file_obj=file_obj)
self.trigger_counter = unpack("<Q", file_obj.read(8))[0]
self.trigger_mask = unpack("<Q", file_obj.read(8))[0]
self.overlays = unpack("<i", file_obj.read(4))[0]
self.n_triggered_hits = unpack("<i", file_obj.read(4))[0]
self.triggered_hits = []
self._parse_triggered_hits(file_obj)
self.n_snapshot_hits = unpack("<i", file_obj.read(4))[0]
self.snapshot_hits = []
self._parse_snapshot_hits(file_obj)
def _parse_triggered_hits(self, file_obj):
"""Parse and store triggered hits."""
for _ in range(self.n_triggered_hits):
dom_id, pmt_id = unpack("<ib", file_obj.read(5))
tdc_time = unpack(">I", file_obj.read(4))[0]
tot = unpack("<b", file_obj.read(1))[0]
trigger_mask = unpack("<Q", file_obj.read(8))
self.triggered_hits.append((dom_id, pmt_id, tdc_time, tot, trigger_mask))
def _parse_snapshot_hits(self, file_obj):
"""Parse and store snapshot hits."""
for _ in range(self.n_snapshot_hits):
dom_id, pmt_id = unpack("<ib", file_obj.read(5))
tdc_time = unpack(">I", file_obj.read(4))[0]
tot = unpack("<b", file_obj.read(1))[0]
self.snapshot_hits.append((dom_id, pmt_id, tdc_time, tot))
def __repr__(self):
string = "\n".join(
(
" Number of triggered hits: " + str(self.n_triggered_hits),
" Number of snapshot hits: " + str(self.n_snapshot_hits),
)
)
string += "\nTriggered hits:\n"
string += pprint.pformat(self.triggered_hits)
string += "\nSnapshot hits:\n"
string += pprint.pformat(self.snapshot_hits)
return string
class TMCHData(object):
"""Monitoring Channel data."""
def __init__(self, file_obj, version=None):
f = file_obj
data_type = f.read(4)
if data_type != b"TMCH":
raise ValueError("Invalid datatype: {0}".format(data_type))
self.run = unpack(">I", f.read(4))[0]
self.udp_sequence_number = unpack(">I", f.read(4))[0]
self.utc_seconds = unpack(">I", f.read(4))[0]
self.nanoseconds = unpack(">I", f.read(4))[0] * 16
self.dom_id = unpack(">I", f.read(4))[0]
self.dom_status_0 = unpack(">I", f.read(4))[0]
self.dom_status_1 = unpack(">I", f.read(4))[0]
self.dom_status_2 = unpack(">I", f.read(4))[0]
self.dom_status_3 = unpack(">I", f.read(4))[0]
self.pmt_rates = [r * 10.0 for r in unpack(">" + 31 * "I", f.read(31 * 4))]
self.hrvbmp = unpack(">I", f.read(4))[0]
self.flags = unpack(">I", f.read(4))[0]
# flags:
# bit 0: AHRS valid
if version is None:
# bit 3-1: structure version
# 000 - 1, 001 - 2, 010 - unused, 011 - 3
self.version = int(bin((self.flags >> 1) & 7), 2) + 1
else:
self.version = version
self.yaw, self.pitch, self.roll = unpack(">fff", f.read(12))
self.A = unpack(">fff", f.read(12)) # AHRS: Ax, Ay, Az
self.G = unpack(">fff", f.read(12)) # AHRS: Gx, Gy, Gz
self.H = unpack(">fff", f.read(12)) # AHRS: Hx, Hy, Hz
self.temp = unpack(">H", f.read(2))[0] / 100.0
self.humidity = unpack(">H", f.read(2))[0] / 100.0
self.tdcfull = unpack(">I", f.read(4))[0]
self.aesfull = unpack(">I", f.read(4))[0]
self.flushc = unpack(">I", f.read(4))[0]
if self.version >= 2:
self.ts_duration_ms = unpack(">I", f.read(4))[0]
if self.version >= 3:
self.tdc_supertime_fifo_size = unpack(">H", f.read(2))[0]
self.aes_supertime_fifo_size = unpack(">H", f.read(2))[0]
def __str__(self):
return str(vars(self))
def __repr__(self):
return self.__str__()
class TMCHRepump(Module):
"""Takes a IO_MONIT raw dump and replays it."""
def configure(self):
filename = self.require("filename")
self.format_version = self.get("format_version", default=None)
self.fobj = open(filename, "rb")
self.blobs = self.blob_generator()
def process(self, blob):
return next(self.blobs)
def blob_generator(self):
while True:
blob = Blob()
datatype = self.fobj.read(4)
if len(datatype) == 0:
return
if datatype == b"TMCH":
self.fobj.seek(-4, 1)
blob["TMCHData"] = TMCHData(self.fobj, version=self.format_version)
yield blob
def finish(self):
self.fobj.close()
def __iter__(self):
return self
def __next__(self):
return next(self.blobs)
class DMMonitor(object):
"""A class which provides access to the Detector Manager parameters.
Examples
--------
>>> import km3pipe as kp
>>> dmm = kp.io.daq.DMMonitor('192.168.0.120', base='clb/outparams')
>>> session = dmm.start_session('test', ['wr_mu/1/0','wr_mu/1/1'])
>>> for values in session:
print(values)
"""
def __init__(self, host, port=1302, base=""):
self._host = host
self._port = port
self._base = base
self._url = "http://{}:{}/mon/{}".format(self._host, self._port, self._base)
self._available_parameters = []
self.log = get_logger(self.__class__.__name__)
self.cprint = get_printer(self.__class__.__name__)
@property
def available_parameters(self):
if not self._available_parameters:
self._get_available_parameters()
return self._available_parameters
def _get_available_parameters(self):
self._available_parameters = json.loads(urlopen(self._url).read())
def get(self, path):
return json.loads(
urlopen(
"http://{}:{}/mon/{}/{}".format(
self._host, self._port, self._base, path
)
).read()
)
def start_session(self, name, paths, interval=10):
self.cprint("Starting session '{}'".format(name))
ret = urlopen(
"http://{}:{}/monshortdef?name={}&paths={}".format(
self._host,
self._port,
name,
",".join(["/mon/{}/{}".format(self._base, p) for p in paths]),
)
).read()
if ret != b"OK":
self.log.error("Could not start session")
return []
return self._session(name, interval)
def _session(self, name, interval):
url = "http://{}:{}/monshort/{}".format(self._host, self._port, name)
while True:
try:
yield json.loads(urlopen(url).read())
except URLError as e:
self.log.error(
"Error when trying to connect to the DM: %s\n"
"Retry in %d seconds..." % (e, interval)
)
time.sleep(interval)
def is_3dshower(trigger_mask):
return bool(trigger_mask & 2)
def is_mxshower(trigger_mask):
return bool(trigger_mask & 4)
def is_3dmuon(trigger_mask):
return bool(trigger_mask & 16)
|
|
import numpy as np
from . import measure_moms
from . import acs_model_e as acs_model
from . import drizzle_position as dp
import os as os
from astropy.io import fits
from . import directories
def acs_determine_focus_metric( true, model ):
'''
SO A is the flag to check if the quadrupole moments
match
'''
dof = np.max( [np.float((len(model.e1)-1)), 1])
goodness_of_fit = np.sum( (true['e1']-model.e1)**2+(true['e2']-model.e2)**2) /dof
return goodness_of_fit
# **************************************************************************
def acs_determine_focus( unknown_focus_image,
observed_moments_stars,
drizzle_file,
wavelength,
data_dir=None,
psf_model_dir=None,
pixel_scale=0.03,
r_match=600):
'''
;+
; NAME:
; ACS_DETERMINE_FOCUS
;
; CATEGORY:
; Reduction of ACS COSMOS data.
;
; PURPOSE:
; Decides the actual focus value of HST during observations, due to
; thermal fluctuations that change the distance between the primary
; and secondary mirrors. The offset from the nominal focus position
; is returned, in microns.
;
; INPUTS:
; moms : Meaured catalogue of moments from the image
;
; OPTIONAL INPUTS:
; CATALOGUES - Structure containing model TinyTim PSF catalogues. Can
; be set for speed. If not set, ;read from disk on first run.
;
; OUTPUTS:
; Focus
;
; KEYWORDS:
; SINGLE : If set work out the focus for a single exposure
; this should be set to the name of the single exp
;
; EXAMPLE USE:
; acs_determine_focus, cluster, filter, results
;
; MODIFICATION HISTORY:
; Feb 05 - All shape moments needed by RRG used, by RM.
; Jan 05 - Written by Richard Massey.
; Aug 13 - Changed by DRH for single exposure
; Sep 19 - Dont use the positions from single image
;-
'''
dirs = directories.return_dirs()
#Now I need measure the moments of the stars in the individual image
#But the x and y here are in the frame of the drizzled frame
#So need to change these to position in the new fram
image_name = unknown_focus_image.split('/')[-1][0:8]
inframe_stars = observed_moments_stars[observed_moments_stars[image_name+'_INFRAME'] == 1]
if not os.path.isfile( unknown_focus_image[:-5]+'_uncor.cat'):
measure_moms( unknown_focus_image, 'NOCAT_NEED',
unknown_focus_image[:-5]+'_uncor.cat',
object_catalogue=inframe_stars,
xGal=inframe_stars[image_name+'_X_IMAGE'],
yGal=inframe_stars[image_name+'_Y_IMAGE'],
mult=2, min_rad=6, quiet=True)
star_moments = fits.open( unknown_focus_image[:-5]+'_uncor.cat' )[1].data
#Now get an array of moments for all the possible focus positions
model_e, focus = acs_model.acs_model_e(star_moments[image_name+'_X_IMAGE'],
star_moments[image_name+'_Y_IMAGE'],
wavelength=wavelength )
#Number of focus positions
n_focus, nobjects = model_e.xx.shape
focus = np.arange(16)-10
average_distance = np.sum(model_e.offset_model, axis=0)/n_focus
# Select only those stars with a suitably close model (since we're not interpolating)
close_match = np.arange(nobjects)[average_distance < r_match]
n_stars=len(close_match)
if n_stars < 2:
print("No stars found in field ")
global_focus=0.
global_focus_error=100.
focus = input("No stars are found so please either input focus or cancel : ")
# Tabulate model ellipticities
model_e1=model_e.e1[:,close_match]
model_e2=model_e.e2[:,close_match]
# Tabulate measured ellipticities
true = star_moments[close_match]
#Find best-fit focus
best_fit_focus_indiv = np.zeros( (2, n_stars ))
for i in range(len(close_match)):
chisq=np.zeros(n_focus) #Absolute chi squared
for f in range(n_focus):
#Tabulate model ellipticities
model = moments( 1 )
model['e1'][0] = model_e.e1[f,close_match][i]
model['e2'][0] = model_e.e2[f,close_match][i]
model['x'][0] = model_e.x[close_match][i]
model['y'][0] = model_e.y[close_match][i]
model['xx'][0] = model_e.xx[f,close_match][i]
model['xy'][0] = model_e.xy[f,close_match][i]
model['yy'][0] = model_e.yy[f,close_match][i]
model['xxxx'][0] = model_e.xxxx[f,close_match][i]
model['xxxy'][0] = model_e.xxxy[f,close_match][i]
model['xxyy'][0] = model_e.xxyy[f,close_match][i]
model['xyyy'][0] = model_e.xyyy[f,close_match][i]
model['yyyy'][0] = model_e.yyyy[f,close_match][i]
chisq[f]=acs_determine_focus_metric(true[i], model)
best_fit_focus_indiv[ 0, i ] = focus[ np.argmin( chisq )]
best_fit_focus_indiv[ 1, i ] = np.min(chisq)
if n_stars < 5:
new_best_fit_focus = best_fit_focus_indiv[0, np.argmin(best_fit_focus_indiv[1,:])]
#Just for the purposes of the test! MAKE SURE I DELTE
else:
best_fit_focus = np.median( best_fit_focus_indiv[0,:] )
#If the median is between two take the best fit out of those two
if np.round( best_fit_focus ) != best_fit_focus:
floor_chi = best_fit_focus_indiv[1, best_fit_focus_indiv[0,:] == \
np.floor( best_fit_focus )]
ceil_chi = best_fit_focus_indiv[1, best_fit_focus_indiv[0,:] == \
np.ceil( best_fit_focus )]
if (len(ceil_chi) == 0) | (len(floor_chi) == 0):
chi_weighted_mean = np.sum( best_fit_focus_indiv[0,:]/best_fit_focus_indiv[1,:])/\
np.sum(1./best_fit_focus_indiv[1,:])
new_best_fit_focus = np.round(chi_weighted_mean)
else:
min_floor_chi = np.min( floor_chi )
min_ceil_chi = np.min( ceil_chi )
focii = np.append( np.floor( best_fit_focus ), np.ceil( best_fit_focus ))
chi = np.append( min_floor_chi, min_ceil_chi )
new_best_fit_focus = focii[ np.argmin( chi ) ]
else:
return best_fit_focus
#Store the best one
return new_best_fit_focus
class moments( dict ):
def __init__( self, n_objects ):
self.__dict__['x'] = np.zeros(n_objects)
self.__dict__['y'] = np.zeros(n_objects)
self.__dict__['e1'] = np.zeros(n_objects)
self.__dict__['e2'] = np.zeros(n_objects)
self.__dict__['xx'] = np.zeros(n_objects)
self.__dict__['xy'] = np.zeros(n_objects)
self.__dict__['yy'] = np.zeros(n_objects)
self.__dict__['xxxx'] = np.zeros(n_objects)
self.__dict__['xxxy'] = np.zeros(n_objects)
self.__dict__['xxyy'] = np.zeros(n_objects)
self.__dict__['xyyy'] = np.zeros(n_objects)
self.__dict__['yyyy'] = np.zeros(n_objects)
def keys(self):
return list(self.__dict__.keys())
def __getitem__(self, key):
return self.__dict__[key]
|
|
import mimetypes
import os.path
import re
import cloudstorage
from django.http import Http404
from django.urls import reverse
from django.utils import safestring
from django.utils import text
from django.utils import timezone
from google.appengine.api import app_identity
from google.appengine.ext import ndb
from . import utils
BUCKET_KEY = 'CLOUD_STORAGE_BUCKET'
language_choices = [(name, name) for name in utils.get_language_names()]
class LexerConfig(ndb.Model):
"""Global config for customising what highlighting to use per file type."""
class mapping(ndb.Model):
extension = ndb.StringProperty(required=True)
language = ndb.StringProperty(required=True)
lexers = ndb.LocalStructuredProperty(mapping, repeated=True)
@classmethod
def get(cls):
"""Singleton method to get/create the configuration objects."""
return cls.get_or_insert('config', lexers=[])
@classmethod
def get_config(cls):
"""Singleton method to get a map of extensions to languages."""
config = cls.get()
mapping = {obj.extension: obj.language for obj in config.lexers}
return mapping
def make_name_for_storage(paste_id, filename, n, dt):
"""Returns a name for an object in Cloud Storage (without a bucket)."""
# Like 'pasty/2016/03/01/1234567890/1-setup.py'.
filename = os.path.normpath(filename)
filename = text.get_valid_filename(filename)
template = u'pasty/{dt:%Y/%m/%d}/{id}/{n}/{filename}'
name = template.format(dt=dt, id=paste_id, n=n, filename=filename)
# UTF-8 is valid, but the SDK stub can't handle non-ASCII characters.
name = name.encode('utf-8')
return name
def make_relative_path(path):
"""Returns the path for a file, relative to the paste ID.
>>> make_relative_path('pasty/1999/12/31/123/1/foo.html')
'1/foo.html'
"""
match = re.search(r'^pasty/\d{4}/\d{1,2}/\d{1,2}/\d+/(.*)', path)
if match:
return match.group(1)
else:
raise ValueError('Invalid file path')
class PastyFile(ndb.Model):
DEFAULT_CONTENT_TYPE = 'text/plain'
DEFAULT_FILENAME = u'untitled.txt'
created = ndb.DateTimeProperty(auto_now_add=True)
filename = ndb.StringProperty(required=True)
path = ndb.StringProperty()
relative_path = ndb.StringProperty()
num_lines = ndb.IntegerProperty(default=0)
def content_highlight(self):
"""Returns the file content with syntax highlighting."""
with self.open('r') as fh:
text = fh.read()
config = LexerConfig.get_config()
_, markup = utils.highlight_content(text, filename=self.filename, config=config)
return safestring.mark_safe(markup)
def bucket_path(self):
bucket = app_identity.get_default_gcs_bucket_name()
return '/%s/%s' % (bucket, self.path)
@classmethod
def create(cls, filename, content, path, relative_path, num_lines):
"""Save the content to cloud storage and return a new PastyFile."""
if isinstance(content, unicode):
content = content.encode('utf-8')
pfile = cls(
filename=filename, path=path, relative_path=relative_path,
num_lines=num_lines)
with pfile.open('w') as fh:
fh.write(content)
return pfile
@ndb.ComputedProperty
def content_type(self):
filename = self.filename or ''
content_type, _ = mimetypes.guess_type(filename)
return content_type or self.DEFAULT_CONTENT_TYPE
def open(self, mode='r'):
path = self.bucket_path()
return cloudstorage.open(path, mode)
class Paste(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
author = ndb.StringProperty()
filename = ndb.StringProperty(required=True, default=PastyFile.DEFAULT_FILENAME)
description = ndb.StringProperty()
fork = ndb.KeyProperty(kind='Paste')
files = ndb.LocalStructuredProperty(PastyFile, repeated=True)
preview = ndb.TextProperty()
def __unicode__(self):
author = self.author if self.author else u'anonymous'
return u'%s / %s' % (author, self.filename)
@classmethod
def create_with_files(cls, files, **kwargs):
"""Creates a new Paste and saves files in storage."""
fork = kwargs.get('fork')
if fork:
kwargs['fork'] = fork.key
# OK. We need to create the Paste, then we can save the files to storage
# (because the storage name includes the paste's ID), then we update
# the paste again with the saved files' information.
paste = Paste(**kwargs)
paste.put()
right_now = timezone.now()
paste_id = paste.key.id()
config = LexerConfig.get_config()
# files is a sequence of (filename, content) pairs. But filename can
# be '', in which case we choose a name based on the content's format
# (e.g. if it looks like CSS, we choose 'untitled.css').
for n, filename_content in enumerate(files, 1):
filename, content = filename_content
# If no filename, we pick one.
if not filename:
lexer = utils.choose_lexer(content)
ext = utils.ext_for_lexer(lexer)
filename = PastyFile.DEFAULT_FILENAME.replace('.txt', ext)
num_lines = utils.count_lines(content)
path = make_name_for_storage(paste_id, filename, n, right_now)
relative_path = make_relative_path(path)
pfile = PastyFile.create(
filename=filename, content=content, path=path,
num_lines=num_lines, relative_path=relative_path)
paste.files.append(pfile)
if files:
# The first file is used to set the paste's own filename and
# preview.
fname = paste.files[0].filename
_, content = files[0]
_, preview = utils.summarize_content(content, filename=fname, config=config)
paste.preview = preview
paste.filename = fname
paste.put()
return paste
@classmethod
def get_or_404(cls, paste_id):
"""Returns a paste object. Raises Http404 if the paste_id is invalid."""
try:
paste_id = int(paste_id)
except (ValueError, TypeError):
raise Http404
paste = cls.get_by_id(paste_id)
if not paste:
raise Http404
return paste
@ndb.ComputedProperty
def num_files(self):
return len(self.files)
@ndb.ComputedProperty
def num_lines(self):
return sum(pasty_file.num_lines for pasty_file in self.files)
@property
def url(self):
url = reverse('paste_detail', args=[self.key.id()])
return url
def to_dict(self):
# Avoid problems when JSON-ifying a forked paste.
obj = super(Paste, self).to_dict()
obj['id'] = self.key.id()
obj['url'] = self.url
if obj['fork']:
obj['fork'] = obj['fork'].id()
return obj
def create_star_for_author(self, author):
"""Helper to get/create a Star for this paste."""
return Star.create(author, self)
class Star(ndb.Model):
created = ndb.DateTimeProperty(auto_now_add=True)
author = ndb.StringProperty(indexed=True)
paste = ndb.KeyProperty(Paste)
@classmethod
def create(self, author, paste):
# We construct the star id ourselves so that if you star something
# twice it doesn't create multiple stars for the same paste.
star_id = u'%s/%s' % (author, paste.key.id())
star = Star.get_or_insert(star_id, author=author, paste=paste.key)
return star
class Peeling(ndb.Model):
"""Legacy model for converting old peelings to new pastes."""
@classmethod
def _get_kind(cls):
return 'pastes_paste'
def get_starred_pastes(email):
"""Returns pastes starred by a user, ordered by when the paste was starred."""
stars = Star.query().filter(Star.author==email).order(-Star.created).fetch(100)
keys = [star.paste for star in stars]
pastes = [key.get() for key in keys]
pastes.sort(key=lambda x: keys.index(x.key))
return pastes
|
|
import itertools
import numpy as np
from numpy import exp
from numpy.testing import assert_, assert_equal
from scipy.optimize import root
def test_performance():
# Compare performance results to those listed in
# [Cheng & Li, IMA J. Num. An. 29, 814 (2008)]
# and
# [W. La Cruz, J.M. Martinez, M. Raydan, Math. Comp. 75, 1429 (2006)].
# and those produced by dfsane.f from M. Raydan's website.
#
# Where the results disagree, the largest limits are taken.
e_a = 1e-5
e_r = 1e-4
table_1 = [
dict(F=F_1, x0=x0_1, n=1000, nit=5, nfev=5),
dict(F=F_1, x0=x0_1, n=10000, nit=2, nfev=2),
dict(F=F_2, x0=x0_2, n=500, nit=11, nfev=11),
dict(F=F_2, x0=x0_2, n=2000, nit=11, nfev=11),
# dict(F=F_4, x0=x0_4, n=999, nit=243, nfev=1188), removed: too sensitive to rounding errors
dict(F=F_6, x0=x0_6, n=100, nit=6, nfev=6), # Results from dfsane.f; papers list nit=3, nfev=3
dict(F=F_7, x0=x0_7, n=99, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_7, x0=x0_7, n=999, nit=23, nfev=29), # Must have n%3==0, typo in papers?
dict(F=F_9, x0=x0_9, n=100, nit=12, nfev=18), # Results from dfsane.f; papers list nit=nfev=6?
dict(F=F_9, x0=x0_9, n=1000, nit=12, nfev=18),
dict(F=F_10, x0=x0_10, n=1000, nit=5, nfev=5), # Results from dfsane.f; papers list nit=2, nfev=12
]
# Check also scaling invariance
for xscale, yscale, line_search in itertools.product([1.0, 1e-10, 1e10], [1.0, 1e-10, 1e10],
['cruz', 'cheng']):
for problem in table_1:
n = problem['n']
func = lambda x, n: yscale*problem['F'](x/xscale, n)
args = (n,)
x0 = problem['x0'](n) * xscale
fatol = np.sqrt(n) * e_a * yscale + e_r * np.linalg.norm(func(x0, n))
sigma_eps = 1e-10 * min(yscale/xscale, xscale/yscale)
sigma_0 = xscale/yscale
with np.errstate(over='ignore'):
sol = root(func, x0, args=args,
options=dict(ftol=0, fatol=fatol, maxfev=problem['nfev'] + 1,
sigma_0=sigma_0, sigma_eps=sigma_eps,
line_search=line_search),
method='DF-SANE')
err_msg = repr([xscale, yscale, line_search, problem, np.linalg.norm(func(sol.x, n)),
fatol, sol.success, sol.nit, sol.nfev])
assert_(sol.success, err_msg)
assert_(sol.nfev <= problem['nfev'] + 1, err_msg) # nfev+1: dfsane.f doesn't count first eval
assert_(sol.nit <= problem['nit'], err_msg)
assert_(np.linalg.norm(func(sol.x, n)) <= fatol, err_msg)
def test_complex():
def func(z):
return z**2 - 1 + 2j
x0 = 2.0j
ftol = 1e-4
sol = root(func, x0, tol=ftol, method='DF-SANE')
assert_(sol.success)
f0 = np.linalg.norm(func(x0))
fx = np.linalg.norm(func(sol.x))
assert_(fx <= ftol*f0)
def test_linear_definite():
# The DF-SANE paper proves convergence for "strongly isolated"
# solutions.
#
# For linear systems F(x) = A x - b = 0, with A positive or
# negative definite, the solution is strongly isolated.
def check_solvability(A, b, line_search='cruz'):
func = lambda x: A.dot(x) - b
xp = np.linalg.solve(A, b)
eps = np.linalg.norm(func(xp)) * 1e3
sol = root(func, b, options=dict(fatol=eps, ftol=0, maxfev=17523, line_search=line_search),
method='DF-SANE')
assert_(sol.success)
assert_(np.linalg.norm(func(sol.x)) <= eps)
n = 90
# Test linear pos.def. system
np.random.seed(1234)
A = np.arange(n*n).reshape(n, n)
A = A + n*n * np.diag(1 + np.arange(n))
assert_(np.linalg.eigvals(A).min() > 0)
b = np.arange(n) * 1.0
check_solvability(A, b, 'cruz')
check_solvability(A, b, 'cheng')
# Test linear neg.def. system
check_solvability(-A, b, 'cruz')
check_solvability(-A, b, 'cheng')
def test_shape():
def f(x, arg):
return x - arg
for dt in [float, complex]:
x = np.zeros([2,2])
arg = np.ones([2,2], dtype=dt)
sol = root(f, x, args=(arg,), method='DF-SANE')
assert_(sol.success)
assert_equal(sol.x.shape, x.shape)
# Some of the test functions and initial guesses listed in
# [W. La Cruz, M. Raydan. Optimization Methods and Software, 18, 583 (2003)]
def F_1(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0] - 1) - 1
g[1:] = i*(exp(x[1:] - 1) - x[1:])
return g
def x0_1(n):
x0 = np.empty([n])
x0.fill(n/(n-1))
return x0
def F_2(x, n):
g = np.zeros([n])
i = np.arange(2, n+1)
g[0] = exp(x[0]) - 1
g[1:] = 0.1*i*(exp(x[1:]) + x[:-1] - 1)
return g
def x0_2(n):
x0 = np.empty([n])
x0.fill(1/n**2)
return x0
def F_4(x, n):
assert_equal(n % 3, 0)
g = np.zeros([n])
# Note: the first line is typoed in some of the references;
# correct in original [Gasparo, Optimization Meth. 13, 79 (2000)]
g[::3] = 0.6 * x[::3] + 1.6 * x[1::3]**3 - 7.2 * x[1::3]**2 + 9.6 * x[1::3] - 4.8
g[1::3] = 0.48 * x[::3] - 0.72 * x[1::3]**3 + 3.24 * x[1::3]**2 - 4.32 * x[1::3] - x[2::3] + 0.2 * x[2::3]**3 + 2.16
g[2::3] = 1.25 * x[2::3] - 0.25*x[2::3]**3
return g
def x0_4(n):
assert_equal(n % 3, 0)
x0 = np.array([-1, 1/2, -1] * (n//3))
return x0
def F_6(x, n):
c = 0.9
mu = (np.arange(1, n+1) - 0.5)/n
return x - 1/(1 - c/(2*n) * (mu[:,None]*x / (mu[:,None] + mu)).sum(axis=1))
def x0_6(n):
return np.ones([n])
def F_7(x, n):
assert_equal(n % 3, 0)
def phi(t):
v = 0.5*t - 2
v[t > -1] = ((-592*t**3 + 888*t**2 + 4551*t - 1924)/1998)[t > -1]
v[t >= 2] = (0.5*t + 2)[t >= 2]
return v
g = np.zeros([n])
g[::3] = 1e4 * x[1::3]**2 - 1
g[1::3] = exp(-x[::3]) + exp(-x[1::3]) - 1.0001
g[2::3] = phi(x[2::3])
return g
def x0_7(n):
assert_equal(n % 3, 0)
return np.array([1e-3, 18, 1] * (n//3))
def F_9(x, n):
g = np.zeros([n])
i = np.arange(2, n)
g[0] = x[0]**3/3 + x[1]**2/2
g[1:-1] = -x[1:-1]**2/2 + i*x[1:-1]**3/3 + x[2:]**2/2
g[-1] = -x[-1]**2/2 + n*x[-1]**3/3
return g
def x0_9(n):
return np.ones([n])
def F_10(x, n):
return np.log(1 + x) - x/n
def x0_10(n):
return np.ones([n])
|
|
"""
PEP 0484 ( https://www.python.org/dev/peps/pep-0484/ ) describes type hints
through function annotations. There is a strong suggestion in this document
that only the type of type hinting defined in PEP0484 should be allowed
as annotations in future python versions.
The (initial / probably incomplete) implementation todo list for pep-0484:
v Function parameter annotations with builtin/custom type classes
v Function returntype annotations with builtin/custom type classes
v Function parameter annotations with strings (forward reference)
v Function return type annotations with strings (forward reference)
v Local variable type hints
v Assigned types: `Url = str\ndef get(url:Url) -> str:`
v Type hints in `with` statements
x Stub files support
x support `@no_type_check` and `@no_type_check_decorator`
x support for typing.cast() operator
x support for type hint comments for functions, `# type: (int, str) -> int`.
See comment from Guido https://github.com/davidhalter/jedi/issues/662
"""
import itertools
import os
import re
from parso import ParserSyntaxError
from parso.python import tree
from jedi.common import unite
from jedi.evaluate.cache import evaluator_method_cache
from jedi.evaluate import compiled
from jedi.evaluate.context import LazyTreeContext
from jedi import debug
from jedi import _compatibility
from jedi import parser_utils
def _evaluate_for_annotation(context, annotation, index=None):
"""
Evaluates a string-node, looking for an annotation
If index is not None, the annotation is expected to be a tuple
and we're interested in that index
"""
if annotation is not None:
definitions = context.eval_node(
_fix_forward_reference(context, annotation))
if index is not None:
definitions = list(itertools.chain.from_iterable(
definition.py__getitem__(index) for definition in definitions
if definition.array_type == 'tuple' and
len(list(definition.py__iter__())) >= index))
return unite(d.execute_evaluated() for d in definitions)
else:
return set()
def _fix_forward_reference(context, node):
evaled_nodes = context.eval_node(node)
if len(evaled_nodes) != 1:
debug.warning("Eval'ed typing index %s should lead to 1 object, "
" not %s" % (node, evaled_nodes))
return node
evaled_node = list(evaled_nodes)[0]
if isinstance(evaled_node, compiled.CompiledObject) and \
isinstance(evaled_node.obj, str):
try:
new_node = context.evaluator.grammar.parse(
_compatibility.unicode(evaled_node.obj),
start_symbol='eval_input',
error_recovery=False
)
except ParserSyntaxError:
debug.warning('Annotation not parsed: %s' % evaled_node.obj)
return node
else:
module = node.get_root_node()
parser_utils.move(new_node, module.end_pos[0])
new_node.parent = context.tree_node
return new_node
else:
return node
@evaluator_method_cache()
def infer_param(execution_context, param):
annotation = param.annotation
module_context = execution_context.get_root_context()
return _evaluate_for_annotation(module_context, annotation)
def py__annotations__(funcdef):
return_annotation = funcdef.annotation
if return_annotation:
dct = {'return': return_annotation}
else:
dct = {}
for function_param in funcdef.get_params():
param_annotation = function_param.annotation
if param_annotation is not None:
dct[function_param.name.value] = param_annotation
return dct
@evaluator_method_cache()
def infer_return_types(function_context):
annotation = py__annotations__(function_context.tree_node).get("return", None)
module_context = function_context.get_root_context()
return _evaluate_for_annotation(module_context, annotation)
_typing_module = None
def _get_typing_replacement_module(grammar):
"""
The idea is to return our jedi replacement for the PEP-0484 typing module
as discussed at https://github.com/davidhalter/jedi/issues/663
"""
global _typing_module
if _typing_module is None:
typing_path = \
os.path.abspath(os.path.join(__file__, "../jedi_typing.py"))
with open(typing_path) as f:
code = _compatibility.unicode(f.read())
_typing_module = grammar.parse(code)
return _typing_module
def py__getitem__(context, typ, node):
if not typ.get_root_context().name.string_name == "typing":
return None
# we assume that any class using [] in a module called
# "typing" with a name for which we have a replacement
# should be replaced by that class. This is not 100%
# airtight but I don't have a better idea to check that it's
# actually the PEP-0484 typing module and not some other
if node.type == "subscriptlist":
nodes = node.children[::2] # skip the commas
else:
nodes = [node]
del node
nodes = [_fix_forward_reference(context, node) for node in nodes]
type_name = typ.name.string_name
# hacked in Union and Optional, since it's hard to do nicely in parsed code
if type_name in ("Union", '_Union'):
# In Python 3.6 it's still called typing.Union but it's an instance
# called _Union.
return unite(context.eval_node(node) for node in nodes)
if type_name in ("Optional", '_Optional'):
# Here we have the same issue like in Union. Therefore we also need to
# check for the instance typing._Optional (Python 3.6).
return context.eval_node(nodes[0])
from jedi.evaluate.representation import ModuleContext
typing = ModuleContext(
context.evaluator,
module_node=_get_typing_replacement_module(context.evaluator.latest_grammar),
path=None
)
factories = typing.py__getattribute__("factory")
assert len(factories) == 1
factory = list(factories)[0]
assert factory
function_body_nodes = factory.tree_node.children[4].children
valid_classnames = set(child.name.value
for child in function_body_nodes
if isinstance(child, tree.Class))
if type_name not in valid_classnames:
return None
compiled_classname = compiled.create(context.evaluator, type_name)
from jedi.evaluate.iterable import FakeSequence
args = FakeSequence(
context.evaluator,
"tuple",
[LazyTreeContext(context, n) for n in nodes]
)
result = factory.execute_evaluated(compiled_classname, args)
return result
def find_type_from_comment_hint_for(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[1], name)
def find_type_from_comment_hint_with(context, node, name):
assert len(node.children[1].children) == 3, \
"Can only be here when children[1] is 'foo() as f'"
varlist = node.children[1].children[2]
return _find_type_from_comment_hint(context, node, varlist, name)
def find_type_from_comment_hint_assign(context, node, name):
return _find_type_from_comment_hint(context, node, node.children[0], name)
def _find_type_from_comment_hint(context, node, varlist, name):
index = None
if varlist.type in ("testlist_star_expr", "exprlist", "testlist"):
# something like "a, b = 1, 2"
index = 0
for child in varlist.children:
if child == name:
break
if child.type == "operator":
continue
index += 1
else:
return []
comment = parser_utils.get_following_comment_same_line(node)
if comment is None:
return []
match = re.match(r"^#\s*type:\s*([^#]*)", comment)
if not match:
return []
annotation = tree.String(
repr(str(match.group(1).strip())),
node.start_pos)
annotation.parent = node.parent
return _evaluate_for_annotation(context, annotation, index)
|
|
import textwrap
from inspect import cleandoc
from jedi._compatibility import literal_eval, is_py3
from parso.python import tree
_EXECUTE_NODES = set([
'funcdef', 'classdef', 'import_from', 'import_name', 'test', 'or_test',
'and_test', 'not_test', 'comparison', 'expr', 'xor_expr', 'and_expr',
'shift_expr', 'arith_expr', 'atom_expr', 'term', 'factor', 'power', 'atom'
])
_FLOW_KEYWORDS = (
'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while'
)
def get_executable_nodes(node, last_added=False):
"""
For static analysis.
"""
result = []
typ = node.type
if typ == 'name':
next_leaf = node.get_next_leaf()
if last_added is False and node.parent.type != 'param' and next_leaf != '=':
result.append(node)
elif typ == 'expr_stmt':
# I think evaluating the statement (and possibly returned arrays),
# should be enough for static analysis.
result.append(node)
for child in node.children:
result += get_executable_nodes(child, last_added=True)
elif typ == 'decorator':
# decorator
if node.children[-2] == ')':
node = node.children[-3]
if node != '(':
result += get_executable_nodes(node)
else:
try:
children = node.children
except AttributeError:
pass
else:
if node.type in _EXECUTE_NODES and not last_added:
result.append(node)
for child in children:
result += get_executable_nodes(child, last_added)
return result
def get_comp_fors(comp_for):
yield comp_for
last = comp_for.children[-1]
while True:
if last.type == 'comp_for':
yield last
elif not last.type == 'comp_if':
break
last = last.children[-1]
def for_stmt_defines_one_name(for_stmt):
"""
Returns True if only one name is returned: ``for x in y``.
Returns False if the for loop is more complicated: ``for x, z in y``.
:returns: bool
"""
return for_stmt.children[1].type == 'name'
def get_flow_branch_keyword(flow_node, node):
start_pos = node.start_pos
if not (flow_node.start_pos < start_pos <= flow_node.end_pos):
raise ValueError('The node is not part of the flow.')
keyword = None
for i, child in enumerate(flow_node.children):
if start_pos < child.start_pos:
return keyword
first_leaf = child.get_first_leaf()
if first_leaf in _FLOW_KEYWORDS:
keyword = first_leaf
return 0
def get_statement_of_position(node, pos):
for c in node.children:
if c.start_pos <= pos <= c.end_pos:
if c.type not in ('decorated', 'simple_stmt', 'suite') \
and not isinstance(c, (tree.Flow, tree.ClassOrFunc)):
return c
else:
try:
return get_statement_of_position(c, pos)
except AttributeError:
pass # Must be a non-scope
return None
def clean_scope_docstring(scope_node):
""" Returns a cleaned version of the docstring token. """
node = scope_node.get_doc_node()
if node is not None:
# TODO We have to check next leaves until there are no new
# leaves anymore that might be part of the docstring. A
# docstring can also look like this: ``'foo' 'bar'
# Returns a literal cleaned version of the ``Token``.
cleaned = cleandoc(safe_literal_eval(node.value))
# Since we want the docstr output to be always unicode, just
# force it.
if is_py3 or isinstance(cleaned, unicode):
return cleaned
else:
return unicode(cleaned, 'UTF-8', 'replace')
return ''
def safe_literal_eval(value):
first_two = value[:2].lower()
if first_two[0] == 'f' or first_two in ('fr', 'rf'):
# literal_eval is not able to resovle f literals. We have to do that
# manually, but that's right now not implemented.
return ''
try:
return literal_eval(value)
except SyntaxError:
# It's possible to create syntax errors with literals like rb'' in
# Python 2. This should not be possible and in that case just return an
# empty string.
# Before Python 3.3 there was a more strict definition in which order
# you could define literals.
return ''
def get_call_signature(funcdef, width=72, call_string=None):
"""
Generate call signature of this function.
:param width: Fold lines if a line is longer than this value.
:type width: int
:arg func_name: Override function name when given.
:type func_name: str
:rtype: str
"""
# Lambdas have no name.
if call_string is None:
if funcdef.type == 'lambdef':
call_string = '<lambda>'
else:
call_string = funcdef.name.value
if funcdef.type == 'lambdef':
p = '(' + ''.join(param.get_code() for param in funcdef.get_params()).strip() + ')'
else:
p = funcdef.children[2].get_code()
code = call_string + p
return '\n'.join(textwrap.wrap(code, width))
def get_doc_with_call_signature(scope_node):
"""
Return a document string including call signature.
"""
call_signature = None
if scope_node.type == 'classdef':
for funcdef in scope_node.iter_funcdefs():
if funcdef.name.value == '__init__':
call_signature = \
get_call_signature(funcdef, call_string=scope_node.name.value)
elif scope_node.type in ('funcdef', 'lambdef'):
call_signature = get_call_signature(scope_node)
doc = clean_scope_docstring(scope_node)
if call_signature is None:
return doc
return '%s\n\n%s' % (call_signature, doc)
def move(node, line_offset):
"""
Move the `Node` start_pos.
"""
try:
children = node.children
except AttributeError:
node.line += line_offset
else:
for c in children:
move(c, line_offset)
def get_following_comment_same_line(node):
"""
returns (as string) any comment that appears on the same line,
after the node, including the #
"""
try:
if node.type == 'for_stmt':
whitespace = node.children[5].get_first_leaf().prefix
elif node.type == 'with_stmt':
whitespace = node.children[3].get_first_leaf().prefix
else:
whitespace = node.get_last_leaf().get_next_leaf().prefix
except AttributeError:
return None
except ValueError:
# TODO in some particular cases, the tree doesn't seem to be linked
# correctly
return None
if "#" not in whitespace:
return None
comment = whitespace[whitespace.index("#"):]
if "\r" in comment:
comment = comment[:comment.index("\r")]
if "\n" in comment:
comment = comment[:comment.index("\n")]
return comment
def is_scope(node):
return node.type in ('file_input', 'classdef', 'funcdef', 'lambdef', 'comp_for')
def get_parent_scope(node, include_flows=False):
"""
Returns the underlying scope.
"""
scope = node.parent
while scope is not None:
if include_flows and isinstance(scope, tree.Flow):
return scope
if is_scope(scope):
break
scope = scope.parent
return scope
|
|
from __future__ import unicode_literals
import tempfile
import shutil
import os
from compose import config
from compose.project import Project
from compose.const import LABEL_CONFIG_HASH
from .testcases import DockerClientTestCase
class ProjectTestCase(DockerClientTestCase):
def run_up(self, cfg, **kwargs):
if 'smart_recreate' not in kwargs:
kwargs['smart_recreate'] = True
project = self.make_project(cfg)
project.up(**kwargs)
return set(project.containers(stopped=True))
def make_project(self, cfg):
return Project.from_dicts(
name='composetest',
client=self.client,
service_dicts=config.from_dictionary(cfg),
)
class BasicProjectTest(ProjectTestCase):
def setUp(self):
super(BasicProjectTest, self).setUp()
self.cfg = {
'db': {'image': 'busybox:latest'},
'web': {'image': 'busybox:latest'},
}
def test_no_change(self):
old_containers = self.run_up(self.cfg)
self.assertEqual(len(old_containers), 2)
new_containers = self.run_up(self.cfg)
self.assertEqual(len(new_containers), 2)
self.assertEqual(old_containers, new_containers)
def test_partial_change(self):
old_containers = self.run_up(self.cfg)
old_db = [c for c in old_containers if c.name_without_project == 'db_1'][0]
old_web = [c for c in old_containers if c.name_without_project == 'web_1'][0]
self.cfg['web']['command'] = '/bin/true'
new_containers = self.run_up(self.cfg)
self.assertEqual(len(new_containers), 2)
preserved = list(old_containers & new_containers)
self.assertEqual(preserved, [old_db])
removed = list(old_containers - new_containers)
self.assertEqual(removed, [old_web])
created = list(new_containers - old_containers)
self.assertEqual(len(created), 1)
self.assertEqual(created[0].name_without_project, 'web_1')
self.assertEqual(created[0].get('Config.Cmd'), ['/bin/true'])
def test_all_change(self):
old_containers = self.run_up(self.cfg)
self.assertEqual(len(old_containers), 2)
self.cfg['web']['command'] = '/bin/true'
self.cfg['db']['command'] = '/bin/true'
new_containers = self.run_up(self.cfg)
self.assertEqual(len(new_containers), 2)
unchanged = old_containers & new_containers
self.assertEqual(len(unchanged), 0)
new = new_containers - old_containers
self.assertEqual(len(new), 2)
class ProjectWithDependenciesTest(ProjectTestCase):
def setUp(self):
super(ProjectWithDependenciesTest, self).setUp()
self.cfg = {
'db': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
},
'web': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
'links': ['db'],
},
'nginx': {
'image': 'busybox:latest',
'command': 'tail -f /dev/null',
'links': ['web'],
},
}
def test_up(self):
containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in containers),
set(['db_1', 'web_1', 'nginx_1']),
)
def test_change_leaf(self):
old_containers = self.run_up(self.cfg)
self.cfg['nginx']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in new_containers - old_containers),
set(['nginx_1']),
)
def test_change_middle(self):
old_containers = self.run_up(self.cfg)
self.cfg['web']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in new_containers - old_containers),
set(['web_1', 'nginx_1']),
)
def test_change_root(self):
old_containers = self.run_up(self.cfg)
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg)
self.assertEqual(
set(c.name_without_project for c in new_containers - old_containers),
set(['db_1', 'web_1', 'nginx_1']),
)
def test_change_root_no_recreate(self):
old_containers = self.run_up(self.cfg)
self.cfg['db']['environment'] = {'NEW_VAR': '1'}
new_containers = self.run_up(self.cfg, allow_recreate=False)
self.assertEqual(new_containers - old_containers, set())
class ServiceStateTest(DockerClientTestCase):
def test_trigger_create(self):
web = self.create_service('web')
self.assertEqual(('create', []), web.convergence_plan(smart_recreate=True))
def test_trigger_noop(self):
web = self.create_service('web')
container = web.create_container()
web.start()
web = self.create_service('web')
self.assertEqual(('noop', [container]), web.convergence_plan(smart_recreate=True))
def test_trigger_start(self):
options = dict(command=["top"])
web = self.create_service('web', **options)
web.scale(2)
containers = web.containers(stopped=True)
containers[0].stop()
containers[0].inspect()
self.assertEqual([c.is_running for c in containers], [False, True])
web = self.create_service('web', **options)
self.assertEqual(
('start', containers[0:1]),
web.convergence_plan(smart_recreate=True),
)
def test_trigger_recreate_with_config_change(self):
web = self.create_service('web', command=["top"])
container = web.create_container()
web = self.create_service('web', command=["top", "-d", "1"])
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
def test_trigger_recreate_with_image_change(self):
repo = 'composetest_myimage'
tag = 'latest'
image = '{}:{}'.format(repo, tag)
image_id = self.client.images(name='busybox')[0]['Id']
self.client.tag(image_id, repository=repo, tag=tag)
try:
web = self.create_service('web', image=image)
container = web.create_container()
# update the image
c = self.client.create_container(image, ['touch', '/hello.txt'])
self.client.commit(c, repository=repo, tag=tag)
self.client.remove_container(c)
web = self.create_service('web', image=image)
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
finally:
self.client.remove_image(image)
def test_trigger_recreate_with_build(self):
context = tempfile.mkdtemp()
try:
dockerfile = os.path.join(context, 'Dockerfile')
with open(dockerfile, 'w') as f:
f.write('FROM busybox\n')
web = self.create_service('web', build=context)
container = web.create_container()
with open(dockerfile, 'w') as f:
f.write('FROM busybox\nCMD echo hello world\n')
web.build()
web = self.create_service('web', build=context)
self.assertEqual(('recreate', [container]), web.convergence_plan(smart_recreate=True))
finally:
shutil.rmtree(context)
class ConfigHashTest(DockerClientTestCase):
def test_no_config_hash_when_one_off(self):
web = self.create_service('web')
container = web.create_container(one_off=True)
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_no_config_hash_when_overriding_options(self):
web = self.create_service('web')
container = web.create_container(environment={'FOO': '1'})
self.assertNotIn(LABEL_CONFIG_HASH, container.labels)
def test_config_hash_with_custom_labels(self):
web = self.create_service('web', labels={'foo': '1'})
container = web.converge()[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
self.assertIn('foo', container.labels)
def test_config_hash_sticks_around(self):
web = self.create_service('web', command=["top"])
container = web.converge()[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
web = self.create_service('web', command=["top", "-d", "1"])
container = web.converge()[0]
self.assertIn(LABEL_CONFIG_HASH, container.labels)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Simple example how to evaluate audio tagging systems with
# sed_eval toolbox.
#
# Author: Toni Heittola ([email protected])
import sed_eval
import dcase_util
# Reference tag activity
reference_tag_list = dcase_util.containers.MetaDataContainer([
{
'filename': 'test1.wav',
'tags': 'cat,dog'
},
{
'filename': 'test2.wav',
'tags': 'dog'
},
{
'filename': 'test3.wav',
'tags': 'bird,cat'
},
{
'filename': 'test4.wav',
'tags': 'cat'
},
{
'filename': 'test5.wav',
'tags': 'bird,speech'
},
{
'filename': 'test6.wav',
'tags': 'dog,speech'
},
{
'filename': 'test7.wav',
'tags': 'speech'
},
])
# Tag probabilities for each tag per file
estimated_tag_probabilities = dcase_util.containers.ProbabilityContainer([
{
'filename': 'test1.wav',
'label': 'bird',
'probability': 0.2
},
{
'filename': 'test1.wav',
'label': 'cat',
'probability': 0.99
},
{
'filename': 'test1.wav',
'label': 'dog',
'probability': 0.88
},
{
'filename': 'test1.wav',
'label': 'speech',
'probability': 0.01
},
{
'filename': 'test2.wav',
'label': 'bird',
'probability': 0.1
},
{
'filename': 'test2.wav',
'label': 'cat',
'probability': 0.3
},
{
'filename': 'test2.wav',
'label': 'dog',
'probability': 0.8
},
{
'filename': 'test2.wav',
'label': 'speech',
'probability': 0.1
},
{
'filename': 'test3.wav',
'label': 'bird',
'probability': 0.7
},
{
'filename': 'test3.wav',
'label': 'cat',
'probability': 0.6
},
{
'filename': 'test3.wav',
'label': 'dog',
'probability': 0.4
},
{
'filename': 'test3.wav',
'label': 'speech',
'probability': 0.3
},
{
'filename': 'test4.wav',
'label': 'bird',
'probability': 0.323
},
{
'filename': 'test4.wav',
'label': 'cat',
'probability': 0.6
},
{
'filename': 'test4.wav',
'label': 'dog',
'probability': 0.56
},
{
'filename': 'test4.wav',
'label': 'speech',
'probability': 0.4
},
{
'filename': 'test5.wav',
'label': 'bird',
'probability': 0.8
},
{
'filename': 'test5.wav',
'label': 'cat',
'probability': 0.7
},
{
'filename': 'test5.wav',
'label': 'dog',
'probability': 0.45
},
{
'filename': 'test5.wav',
'label': 'speech',
'probability': 0.43
},
{
'filename': 'test6.wav',
'label': 'bird',
'probability': 0.9
},
{
'filename': 'test6.wav',
'label': 'cat',
'probability': 0.53
},
{
'filename': 'test6.wav',
'label': 'dog',
'probability': 0.83
},
{
'filename': 'test6.wav',
'label': 'speech',
'probability': 0.95
},
{
'filename': 'test7.wav',
'label': 'bird',
'probability': 0.2
},
{
'filename': 'test7.wav',
'label': 'cat',
'probability': 0.2
},
{
'filename': 'test7.wav',
'label': 'dog',
'probability': 0.89
},
{
'filename': 'test7.wav',
'label': 'speech',
'probability': 0.45
},
])
# Process estimations and make decisions with 0.5 threshold
estimated_tag_list = dcase_util.containers.MetaDataContainer()
for file in estimated_tag_probabilities.unique_files:
k = estimated_tag_probabilities.filter(filename=file)
tags = []
for item in k:
if item.probability > 0.5:
tags.append(item.label)
estimated_tag_list.append(
{
'filename': file,
'tags': tags
}
)
# Initialize evaluator
tag_evaluator = sed_eval.audio_tag.AudioTaggingMetrics(
tags=reference_tag_list.unique_tags
)
# Evaluate
tag_evaluator.evaluate(
reference_tag_list=reference_tag_list,
estimated_tag_list=estimated_tag_list,
estimated_tag_probabilities=estimated_tag_probabilities
)
# Print all metrics as report
print(tag_evaluator)
# Audio tagging metrics
# ========================================
# Tags : 4
# Evaluated units : 11
#
# Overall metrics (micro-average)
# ======================================
# F-measure
# F-measure (F1) : 72.00 %
# Precision : 64.29 %
# Recall : 81.82 %
# Equal error rate
# Equal error rate (EER) : 18.18 %
#
# Class-wise average metrics (macro-average)
# ======================================
# F-measure
# F-measure (F1) : 70.00 %
# Precision : 71.67 %
# Recall : 83.33 %
# Equal error rate
# Equal error rate (EER) : 17.50 %
#
# Class-wise metrics
# ======================================
# Tag | Nref Nsys | F-score Pre Rec | EER
# ----------------- | --------- --------- | --------- --------- --------- | ---------
# bird | 2 3 | 80.0% 66.7 100.0 | 20.0%
# cat | 3 5 | 75.0% 60.0 100.0 | 25.0%
# dog | 3 5 | 75.0% 60.0 100.0 | 25.0%
# speech | 3 1 | 50.0% 100.0 33.3 | 0.0%
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import random
import unittest
import uuid
import warnings
import properties
class TestDefault(unittest.TestCase):
def test_random_default(self):
class HasColor(properties.HasProperties):
col = properties.Color('a color', default='random')
hc = HasColor()
assert hc._props['col'].default == 'random'
assert hc.col != 'random'
# 1 in 1.27e130 chance this will pass if hc.col is changing every time
for _ in range(0, 100):
assert hc.col == hc.col
def test_default_order(self):
class HasIntA(properties.HasProperties):
a = properties.Integer('int a')
hi = HasIntA()
assert hi.a is None
hi.a = 5
del(hi.a)
assert hi.a is None
with self.assertRaises(ValueError):
hi.validate()
class HasIntB(properties.HasProperties):
b = properties.Integer('int b', required=False)
hi = HasIntB()
assert hi.b is None
hi.b = 5
del(hi.b)
assert hi.b is None
assert hi.validate()
class HasIntC(properties.HasProperties):
c = properties.Integer('int c', default=5)
hi = HasIntC()
assert hi.c == 5
hi.c = 10
del(hi.c)
assert hi.c is None
class HasIntClassDef(HasIntC):
_defaults = {'c': 100}
hi = HasIntClassDef()
assert hi.c == 100
hi.c = 10
del(hi.c)
assert hi.c is None
with self.assertRaises(AttributeError):
class HasIntCError(HasIntC):
_defaults = {'z': 100}
HasIntCError()
class HasIntD(properties.HasProperties):
d = properties.Integer('int d', default=5, required=False)
hi = HasIntD()
assert hi.d == 5
hi.d = 10
del(hi.d)
assert hi.d is None
class NewDefInt(properties.Integer):
_class_default = 1000
class HasIntE(properties.HasProperties):
e = NewDefInt('int e')
hi = HasIntE()
assert hi.e == 1000
hi.e = 10
del(hi.e)
assert hi.e is None
class HasIntF(properties.HasProperties):
f = NewDefInt('int e', default=5)
hi = HasIntF()
assert hi.f == 5
hi.f = 10
del(hi.f)
assert hi.f is None
class HasIntFandG(HasIntF):
_defaults = {'f': 20, 'g': 25}
g = properties.Integer('int g', default=10)
hi = HasIntFandG()
assert hi.f == 20
assert hi.g == 25
hi = HasIntFandG(g=12)
assert hi.f == 20
assert hi.g == 12
class HasIntFGH(HasIntFandG):
h = NewDefInt('int h')
_defaults = dict(f=30)
hi = HasIntFGH()
assert hi.f == 30
assert hi.g == 25
assert hi.h == 1000
with self.assertRaises(AttributeError):
class BadDefault(HasIntFGH):
_defaults = dict(f='hi')
class HasIntFGHDefs(HasIntFGH):
_defaults = dict(h=-10)
hi = HasIntFGHDefs()
assert hi.f == 30
assert hi.g == 25
assert hi.h == -10
def test_union_default(self):
class HasUnionA(properties.HasProperties):
a = properties.Union('union', (properties.Integer(''),
properties.String('')))
hu = HasUnionA()
assert hu.a is None
hu.a = 5
hu.a = 'hi'
del(hu.a)
assert hu.a is None
class HasUnionB(properties.HasProperties):
b = properties.Union('union', (properties.Integer('', default=5),
properties.String('')))
hu = HasUnionB()
assert hu.b == 5
hu.b = 'hi'
del(hu.b)
assert hu.b is None
class HasUnionC(properties.HasProperties):
c = properties.Union('union', (
properties.Integer(''),
properties.String('', default='hi'),
properties.Integer(''))
)
hu = HasUnionC()
assert hu.c == 'hi'
hu.c = 5
del(hu.c)
assert hu.c is None
class HasUnionD(properties.HasProperties):
d = properties.Union('union', (
properties.Integer(''),
properties.String(''),
properties.Integer('')
), default=100)
hu = HasUnionD()
assert hu.d == 100
hu.d = 5
del(hu.d)
assert hu.d is None
with self.assertRaises(TypeError):
properties.Union(
'union',
(properties.Integer(''), properties.Bool('')),
default=0.5
)
with warnings.catch_warnings(record=True) as w:
properties.Union('union', (properties.Integer('', default=5),
properties.Bool('', default=True)))
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
with warnings.catch_warnings(record=True) as w:
properties.Union('union', (properties.Integer('', default=5),
properties.Bool('', default=True)),
default=False)
assert len(w) > 0
assert issubclass(w[0].category, RuntimeWarning)
def twelve():
return 12
HasUnionD._props['d'].default = twelve
hu = HasUnionD()
assert hu.d == 12
HasUnionD._props['d'].default = properties.undefined
hu = HasUnionD()
assert hu.d is None
def test_instance_default(self):
class HasInt(properties.HasProperties):
a = properties.Integer('int a')
class HasInstance(properties.HasProperties):
inst = properties.Instance('has int instance', HasInt,
auto_create=True)
hi0 = HasInstance()
hi1 = HasInstance()
assert isinstance(hi0.inst, HasInt)
assert isinstance(hi1.inst, HasInt)
assert hi0.inst is not hi1.inst
hi0.inst.a = 5
assert hi1.inst.a is None
del hi0.inst
assert hi0.inst is None
class HasIntSubclass(HasInt):
pass
class HasInstanceSubclass(HasInstance):
_defaults = {'inst': HasIntSubclass}
hi2 = HasInstanceSubclass()
assert isinstance(hi2.inst, HasIntSubclass)
class HasList(properties.HasProperties):
z = properties.List('z list', HasInstance, default=list)
hl0 = HasList()
hl1 = HasList()
assert isinstance(hl0.z, list)
assert isinstance(hl1.z, list)
assert hl0.z is not hl1.z
def test_list_default(self):
class ListDefault(properties.List):
_class_default = list
class HasIntList(properties.HasProperties):
intlist = ListDefault('list of ints', properties.Integer(''))
hil = HasIntList()
assert isinstance(hil.intlist, list)
assert len(hil.intlist) == 0
assert hil.intlist is not HasIntList().intlist
with warnings.catch_warnings(record=True) as w:
properties.List('list', properties.Integer('', default=5))
assert len(w) == 1
assert issubclass(w[0].category, RuntimeWarning)
class HasDefaultIntList(properties.HasProperties):
intlist = properties.List('list of ints', properties.Integer('', default=5))
hdil = HasDefaultIntList()
assert hdil.intlist is None
def test_reset(self):
class HasInts(properties.HasProperties):
_defaults = {'b': 10}
a = properties.Integer('int a', default=1)
b = properties.Integer('int b')
@properties.observer('a')
def _set_b_to_five(self, change):
self.b = 5
hi = HasInts()
assert hi.a == 1
assert hi.b == 10
del hi.a
assert hi.a is None
assert hi.b == 5
hi._reset('b')
assert hi.b == 10
with properties.listeners_disabled():
hi._reset('a')
assert hi.a == 1
assert hi.b == 10
with self.assertRaises(AttributeError):
hi._reset('c')
class HasUid(properties.HasProperties):
uid = properties.Uuid('uid')
hu = HasUid()
with self.assertRaises(AttributeError):
hu._reset('uid')
def test_callable(self):
class HasUid(properties.HasProperties):
uid = properties.Uuid('uid')
class HasUidZero(HasUid):
_defaults = {'uid': lambda: uuid.UUID(int=0)}
huz = HasUidZero()
assert (properties.Uuid.to_json(huz.uid) ==
'00000000-0000-0000-0000-000000000000')
NUMBER = 1
def generate_int():
return NUMBER
class HasInt(properties.HasProperties):
a = properties.Integer('an int', default=generate_int)
hi = HasInt()
assert hi.a == 1
NUMBER = 2
hi._reset('a')
assert hi.a == 2
class HasNewInt(HasInt):
_defaults = {'a': lambda: generate_int()+1}
hi = HasNewInt()
assert hi.a == 3
def test_default_validation(self):
class HasMatrix(properties.HasProperties):
matrix = properties.Array(
'2x2 matrix',
shape=(2, 2),
default=lambda: [[1., 2.], [3., 4.]],
)
def test_default_override_dynamic(self):
class HasIntABase(properties.HasProperties):
a = properties.Integer(
'int a',
default=5,
)
class HasIntASub(HasIntABase):
@properties.Integer('int a')
def a(self):
return 10
class HasIntBBase(properties.HasProperties):
b = properties.Integer(
'int b',
)
_defaults = {'b': 2}
class HasIntBSub(HasIntBBase):
@properties.Integer('int b')
def b(self):
return 4
hi = HasIntABase()
assert hi.a == 5
del(hi.a)
assert hi.a is None
with self.assertRaises(ValueError):
hi.validate()
hid = HasIntASub()
assert hid.a == 10
with self.assertRaises(AttributeError):
del hid.a
hid.validate()
hi = HasIntBBase()
assert hi.b == 2
del(hi.b)
assert hi.b is None
with self.assertRaises(ValueError):
hi.validate()
hid = HasIntBSub()
assert hid.b == 4
with self.assertRaises(AttributeError):
del hid.b
hid.validate()
if __name__ == '__main__':
unittest.main()
|
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" Rally command: task """
from __future__ import print_function
import json
import os
import sys
import webbrowser
import jsonschema
from oslo_utils import uuidutils
import yaml
from rally import api
from rally.cli import cliutils
from rally.cli import envutils
from rally.common import db
from rally.common import fileutils
from rally.common.i18n import _
from rally.common import junit
from rally.common import log as logging
from rally.common import objects
from rally.common import utils as rutils
from rally import consts
from rally import exceptions
from rally import plugins
from rally.task.processing import plot
from rally.task.processing import utils
class FailedToLoadTask(exceptions.RallyException):
msg_fmt = _("Failed to load task")
class TaskCommands(object):
"""Task management.
Set of commands that allow you to manage benchmarking tasks and results.
"""
def _load_task(self, task_file, task_args=None, task_args_file=None):
"""Load tasks template from file and render it with passed args.
:param task_file: Path to file with input task
:param task_args: JSON or YAML representation of dict with args that
will be used to render input task with jinja2
:param task_args_file: Path to file with JSON or YAML representation
of dict, that will be used to render input
with jinja2. If both specified task_args and
task_args_file they will be merged. task_args
has bigger priority so it will update values
from task_args_file.
:returns: Str with loaded and rendered task
"""
print(cliutils.make_header("Preparing input task"))
def print_invalid_header(source_name, args):
print(_("Invalid %(source)s passed: \n\n %(args)s \n")
% {"source": source_name, "args": args},
file=sys.stderr)
def parse_task_args(src_name, args):
try:
kw = args and yaml.safe_load(args)
kw = {} if kw is None else kw
except yaml.parser.ParserError as e:
print_invalid_header(src_name, args)
print(_("%(source)s has to be YAML or JSON. Details:"
"\n\n%(err)s\n")
% {"source": src_name, "err": e},
file=sys.stderr)
raise TypeError()
if not isinstance(kw, dict):
print_invalid_header(src_name, args)
print(_("%(src)s has to be dict, actually %(src_type)s\n")
% {"src": src_name, "src_type": type(kw)},
file=sys.stderr)
raise TypeError()
return kw
try:
kw = {}
if task_args_file:
with open(task_args_file) as f:
kw.update(parse_task_args("task_args_file", f.read()))
kw.update(parse_task_args("task_args", task_args))
except TypeError:
raise FailedToLoadTask()
with open(task_file) as f:
try:
input_task = f.read()
task_dir = os.path.expanduser(
os.path.dirname(task_file)) or "./"
rendered_task = api.Task.render_template(input_task,
task_dir, **kw)
except Exception as e:
print(_("Failed to render task template:\n%(task)s\n%(err)s\n")
% {"task": input_task, "err": e},
file=sys.stderr)
raise FailedToLoadTask()
print(_("Input task is:\n%s\n") % rendered_task)
try:
return yaml.safe_load(rendered_task)
except Exception as e:
print(_("Wrong format of rendered input task. It should be "
"YAML or JSON.\n%s") % e,
file=sys.stderr)
raise FailedToLoadTask()
@cliutils.args("--deployment", type=str, dest="deployment",
required=False, help="UUID or name of the deployment")
@cliutils.args("--task", "--filename",
help="Path to the file with full configuration of task")
@cliutils.args("--task-args", dest="task_args",
help="Input task args (dict in json). These args are used "
"to render input task that is jinja2 template.")
@cliutils.args("--task-args-file", dest="task_args_file",
help="Path to the file with input task args (dict in "
"json/yaml). These args are used to render input "
"task that is jinja2 template.")
@envutils.with_default_deployment(cli_arg_name="deployment")
@plugins.ensure_plugins_are_loaded
def validate(self, task, deployment=None, task_args=None,
task_args_file=None):
"""Validate a task configuration file.
This will check that task configuration file has valid syntax and
all required options of scenarios, contexts, SLA and runners are set.
:param task: a file with yaml/json task
:param task_args: Input task args (dict in json/yaml). These args are
used to render input task that is jinja2 template.
:param task_args_file: File with input task args (dict in json/yaml).
These args are used to render input task that
is jinja2 template.
:param deployment: UUID or name of a deployment
"""
try:
input_task = self._load_task(task, task_args, task_args_file)
except FailedToLoadTask:
return(1)
try:
api.Task.validate(deployment, input_task)
print("Task config is valid :)")
except exceptions.InvalidTaskException as e:
print("Task config is invalid: \n")
print(e)
return(1)
@cliutils.args("--deployment", type=str, dest="deployment",
required=False, help="UUID or name of the deployment")
@cliutils.args("--task", "--filename", help="Path to the input task file")
@cliutils.args("--task-args", dest="task_args",
help="Input task args (dict in json). These args are used "
"to render input task that is jinja2 template.")
@cliutils.args("--task-args-file", dest="task_args_file",
help="Path to the file with input task args (dict in "
"json/yaml). These args are used to render input "
"task that is jinja2 template.")
@cliutils.args("--tag", help="Tag for this task")
@cliutils.args("--no-use", action="store_false", dest="do_use",
help="Don't set new task as default for future operations")
@cliutils.args("--abort-on-sla-failure", action="store_true",
dest="abort_on_sla_failure",
help="Abort the execution of a benchmark scenario when"
"any SLA check for it fails")
@envutils.with_default_deployment(cli_arg_name="deployment")
@plugins.ensure_plugins_are_loaded
def start(self, task, deployment=None, task_args=None, task_args_file=None,
tag=None, do_use=False, abort_on_sla_failure=False):
"""Start benchmark task.
:param task: a file with yaml/json task
:param task_args: Input task args (dict in json/yaml). These args are
used to render input task that is jinja2 template.
:param task_args_file: File with input task args (dict in json/yaml).
These args are used to render input task that
is jinja2 template.
:param deployment: UUID or name of a deployment
:param tag: optional tag for this task
:param do_use: if True, the new task will be stored as the default one
for future operations
:param abort_on_sla_failure: if True, the execution of a benchmark
scenario will stop when any SLA check
for it fails
"""
try:
input_task = self._load_task(task, task_args, task_args_file)
except FailedToLoadTask:
return(1)
try:
task = api.Task.create(deployment, tag)
print(cliutils.make_header(
_("Task %(tag)s %(uuid)s: started")
% {"uuid": task["uuid"], "tag": task["tag"]}))
print("Benchmarking... This can take a while...\n")
print("To track task status use:\n")
print("\trally task status\n\tor\n\trally task detailed\n")
if do_use:
self.use(task["uuid"])
api.Task.start(deployment, input_task, task=task,
abort_on_sla_failure=abort_on_sla_failure)
self.detailed(task_id=task["uuid"])
except exceptions.InvalidConfigException:
return(1)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
@envutils.with_default_task_id
@cliutils.args("--soft", action="store_true",
help="Abort task after current scenario full execution")
def abort(self, task_id=None, soft=False):
"""Abort started benchmarking task.
:param task_id: Task uuid
:param soft: if set to True, task should be aborted after execution of
current scenario
"""
if soft:
print("INFO: please be informed that soft abort wont stop "
"current running scenario, it will prevent to start "
"new ones, so if you are running task with only one "
"scenario - soft abort will not help at all.")
api.Task.abort(task_id, soft, async=False)
print("Task %s successfully stopped." % task_id)
@cliutils.args("--uuid", type=str, dest="task_id", help="UUID of task")
@envutils.with_default_task_id
def status(self, task_id=None):
"""Display current status of task.
:param task_id: Task uuid
Returns current status of task
"""
task = db.task_get(task_id)
print(_("Task %(task_id)s: %(status)s")
% {"task_id": task_id, "status": task["status"]})
@cliutils.args("--uuid", type=str, dest="task_id",
help=("uuid of task, if --uuid is \"last\" results of most "
"recently created task will be displayed."))
@cliutils.args("--iterations-data", dest="iterations_data",
action="store_true",
help="print detailed results for each iteration")
@envutils.with_default_task_id
def detailed(self, task_id=None, iterations_data=False):
"""Display results table.
:param task_id: Task uuid
:param iterations_data: print detailed results for each iteration
Prints detailed information of task.
"""
def _print_iterations_data(raw_data):
headers = ["iteration", "full duration"]
float_cols = ["full duration"]
atomic_actions = []
for row in raw_data:
# find first non-error result to get atomic actions names
if not row["error"] and "atomic_actions" in row:
atomic_actions = row["atomic_actions"].keys()
for row in raw_data:
if row["atomic_actions"]:
for (c, a) in enumerate(atomic_actions, 1):
action = "%(no)i. %(action)s" % {"no": c, "action": a}
headers.append(action)
float_cols.append(action)
break
table_rows = []
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
for (c, r) in enumerate(raw_data, 1):
dlist = [c]
dlist.append(r["duration"])
if r["atomic_actions"]:
for action in atomic_actions:
dlist.append(r["atomic_actions"].get(action) or 0)
table_rows.append(rutils.Struct(**dict(zip(headers, dlist))))
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters)
print()
task = db.task_get_detailed(task_id)
if task is None:
print("The task %s can not be found" % task_id)
return(1)
print()
print("-" * 80)
print(_("Task %(task_id)s: %(status)s")
% {"task_id": task_id, "status": task["status"]})
if task["status"] == consts.TaskStatus.FAILED:
print("-" * 80)
verification = yaml.safe_load(task["verification_log"])
if not logging.is_debug():
print(verification[0])
print(verification[1])
print()
print(_("For more details run:\nrally -vd task detailed %s")
% task["uuid"])
else:
print(yaml.safe_load(verification[2]))
return
for result in task["results"]:
key = result["key"]
print("-" * 80)
print()
print("test scenario %s" % key["name"])
print("args position %s" % key["pos"])
print("args values:")
print(json.dumps(key["kw"], indent=2))
raw = result["data"]["raw"]
table_cols = ["action", "min", "median",
"90%ile", "95%ile", "max",
"avg", "success", "count"]
float_cols = ["min", "median",
"90%ile", "95%ile", "max",
"avg"]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
table_rows = []
actions_data = utils.get_atomic_actions_data(raw)
for action in actions_data:
durations = actions_data[action]
if durations:
data = [action,
round(min(durations), 3),
round(utils.median(durations), 3),
round(utils.percentile(durations, 0.90), 3),
round(utils.percentile(durations, 0.95), 3),
round(max(durations), 3),
round(utils.mean(durations), 3),
"%.1f%%" % (len(durations) * 100.0 / len(raw)),
len(raw)]
else:
data = [action, None, None, None, None, None, None,
"0.0%", len(raw)]
table_rows.append(rutils.Struct(**dict(zip(table_cols, data))))
cliutils.print_list(table_rows, fields=table_cols,
formatters=formatters,
table_label="Response Times (sec)",
sortby_index=None)
if iterations_data:
_print_iterations_data(raw)
print(_("Load duration: %s") % result["data"]["load_duration"])
print(_("Full duration: %s") % result["data"]["full_duration"])
# NOTE(hughsaunders): ssrs=scenario specific results
ssrs = []
for result in raw:
data = result["scenario_output"].get("data")
if data:
ssrs.append(data)
if ssrs:
keys = set()
for ssr in ssrs:
keys.update(ssr.keys())
headers = ["key", "min", "median",
"90%ile", "95%ile", "max",
"avg"]
float_cols = ["min", "median", "90%ile",
"95%ile", "max", "avg"]
formatters = dict(zip(float_cols,
[cliutils.pretty_float_formatter(col, 3)
for col in float_cols]))
table_rows = []
for key in keys:
values = [float(ssr[key]) for ssr in ssrs if key in ssr]
if values:
row = [str(key),
round(min(values), 3),
round(utils.median(values), 3),
round(utils.percentile(values, 0.90), 3),
round(utils.percentile(values, 0.95), 3),
round(max(values), 3),
round(utils.mean(values), 3)]
else:
row = [str(key)] + ["n/a"] * 6
table_rows.append(rutils.Struct(**dict(zip(headers, row))))
print("\nScenario Specific Results\n")
cliutils.print_list(table_rows,
fields=headers,
formatters=formatters,
table_label="Response Times (sec)")
for result in raw:
errors = result["scenario_output"].get("errors")
if errors:
print(errors)
print()
print("HINTS:")
print(_("* To plot HTML graphics with this data, run:"))
print("\trally task report %s --out output.html" % task["uuid"])
print()
print(_("* To generate a JUnit report, run:"))
print("\trally task report %s --junit --out output.xml" %
task["uuid"])
print()
print(_("* To get raw JSON output of task results, run:"))
print("\trally task results %s\n" % task["uuid"])
@cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task")
@envutils.with_default_task_id
@cliutils.suppress_warnings
def results(self, task_id=None):
"""Display raw task results.
This will produce a lot of output data about every iteration.
:param task_id: Task uuid
"""
results = [{"key": x["key"], "result": x["data"]["raw"],
"sla": x["data"]["sla"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]}
for x in objects.Task.get(task_id).get_results()]
if results:
print(json.dumps(results, sort_keys=True, indent=4))
else:
print(_("The task %s is still running, results will become "
"available when it is finished.") % task_id)
return(1)
@cliutils.args("--deployment", type=str, dest="deployment",
help="List tasks from specified deployment."
"By default tasks listed from active deployment.")
@cliutils.args("--all-deployments", action="store_true",
dest="all_deployments",
help="List tasks from all deployments.")
@cliutils.args("--status", type=str, dest="status",
help="List tasks with specified status."
" Available statuses: %s" % ", ".join(consts.TaskStatus))
@cliutils.args("--uuids-only", action="store_true",
dest="uuids_only", help="List task UUIDs only")
@envutils.with_default_deployment(cli_arg_name="deployment")
def list(self, deployment=None, all_deployments=False, status=None,
uuids_only=False):
"""List tasks, started and finished.
Displayed tasks could be filtered by status or deployment.
By default 'rally task list' will display tasks from active deployment
without filtering by status.
:param deployment: UUID or name of deployment
:param status: task status to filter by.
Available task statuses are in rally.consts.TaskStatus
:param all_deployments: display tasks from all deployments
:param uuids_only: list task UUIDs only
"""
filters = {}
headers = ["uuid", "deployment_name", "created_at", "duration",
"status", "tag"]
if status in consts.TaskStatus:
filters.setdefault("status", status)
elif status:
print(_("Error: Invalid task status '%s'.\n"
"Available statuses: %s") % (
status, ", ".join(consts.TaskStatus)),
file=sys.stderr)
return(1)
if not all_deployments:
filters.setdefault("deployment", deployment)
task_list = [task.to_dict() for task in objects.Task.list(**filters)]
for x in task_list:
x["duration"] = x["updated_at"] - x["created_at"]
if uuids_only:
if task_list:
cliutils.print_list(task_list, ["uuid"],
print_header=False,
print_border=False)
elif task_list:
cliutils.print_list(
task_list,
headers, sortby_index=headers.index("created_at"))
else:
if status:
print(_("There are no tasks in '%s' status. "
"To run a new task, use:\n"
"\trally task start") % status)
else:
print(_("There are no tasks. To run a new task, use:\n"
"\trally task start"))
@cliutils.args("--tasks", dest="tasks", nargs="+",
help="uuids of tasks or json files with task results")
@cliutils.args("--out", type=str, dest="out", required=True,
help="Path to output file.")
@cliutils.args("--open", dest="open_it", action="store_true",
help="Open it in browser.")
@cliutils.args("--html", dest="out_format",
action="store_const", const="html",
help="Generate the report in HTML.")
@cliutils.args("--junit", dest="out_format",
action="store_const", const="junit",
help="Generate the report in the JUnit format.")
@envutils.default_from_global("tasks", envutils.ENV_TASK, "--uuid")
@cliutils.suppress_warnings
def report(self, tasks=None, out=None, open_it=False, out_format="html"):
"""Generate report file for specified task.
:param task_id: UUID, task identifier
:param tasks: list, UUIDs od tasks or pathes files with tasks results
:param out: str, output file name
:param open_it: bool, whether to open output file in web browser
:param out_format: output format (junit or html)
"""
tasks = isinstance(tasks, list) and tasks or [tasks]
results = []
message = []
processed_names = {}
for task_file_or_uuid in tasks:
if os.path.exists(os.path.expanduser(task_file_or_uuid)):
with open(os.path.expanduser(task_file_or_uuid),
"r") as inp_js:
tasks_results = json.load(inp_js)
for result in tasks_results:
try:
jsonschema.validate(
result,
objects.task.TASK_RESULT_SCHEMA)
except jsonschema.ValidationError as e:
print(_("ERROR: Invalid task result format in %s")
% task_file_or_uuid, file=sys.stderr)
if logging.is_debug():
print(e, file=sys.stderr)
else:
print(e.message, file=sys.stderr)
return 1
elif uuidutils.is_uuid_like(task_file_or_uuid):
tasks_results = map(
lambda x: {"key": x["key"],
"sla": x["data"]["sla"],
"result": x["data"]["raw"],
"load_duration": x["data"]["load_duration"],
"full_duration": x["data"]["full_duration"]},
objects.Task.get(task_file_or_uuid).get_results())
else:
print(_("ERROR: Invalid UUID or file name passed: %s"
) % task_file_or_uuid,
file=sys.stderr)
return 1
for task_result in tasks_results:
if task_result["key"]["name"] in processed_names:
processed_names[task_result["key"]["name"]] += 1
task_result["key"]["pos"] = processed_names[
task_result["key"]["name"]]
else:
processed_names[task_result["key"]["name"]] = 0
results.append(task_result)
output_file = os.path.expanduser(out)
if out_format == "html":
with open(output_file, "w+") as f:
f.write(plot.plot(results))
if open_it:
webbrowser.open_new_tab("file://" + os.path.realpath(out))
elif out_format == "junit":
test_suite = junit.JUnit("Rally test suite")
for result in results:
if isinstance(result["sla"], list):
message = ",".join([sla["detail"] for sla in
result["sla"] if not sla["success"]])
if message:
outcome = junit.JUnit.FAILURE
else:
outcome = junit.JUnit.SUCCESS
test_suite.add_test(result["key"]["name"],
result["full_duration"], outcome, message)
with open(output_file, "w+") as f:
f.write(test_suite.to_xml())
else:
print(_("Invalid output format: %s") % out_format,
file=sys.stderr)
return 1
@cliutils.args("--force", action="store_true", help="force delete")
@cliutils.args("--uuid", type=str, dest="task_id", nargs="*",
metavar="TASK_ID",
help="uuid of task or a list of task uuids")
@envutils.with_default_task_id
def delete(self, task_id=None, force=False):
"""Delete task and its results.
:param task_id: Task uuid or a list of task uuids
:param force: Force delete or not
"""
def _delete_single_task(tid, force):
try:
api.Task.delete(tid, force=force)
print("Successfully deleted task `%s`" % tid)
except exceptions.TaskInvalidStatus as e:
print(e)
print("Use '--force' option to delete the task with vague "
"state.")
if isinstance(task_id, list):
for tid in task_id:
_delete_single_task(tid, force)
else:
_delete_single_task(task_id, force)
@cliutils.args("--uuid", type=str, dest="task_id", help="uuid of task")
@cliutils.args("--json", dest="tojson",
action="store_true",
help="output in json format")
@envutils.with_default_task_id
def sla_check(self, task_id=None, tojson=False):
"""Display SLA check results table.
:param task_id: Task uuid.
:returns: Number of failed criteria.
"""
results = objects.Task.get(task_id).get_results()
failed_criteria = 0
data = []
STATUS_PASS = "PASS"
STATUS_FAIL = "FAIL"
for result in results:
key = result["key"]
for sla in sorted(result["data"]["sla"],
key=lambda x: x["criterion"]):
success = sla.pop("success")
sla["status"] = success and STATUS_PASS or STATUS_FAIL
sla["benchmark"] = key["name"]
sla["pos"] = key["pos"]
failed_criteria += int(not success)
data.append(sla if tojson else rutils.Struct(**sla))
if tojson:
print(json.dumps(data, sort_keys=False))
else:
cliutils.print_list(data, ("benchmark", "pos", "criterion",
"status", "detail"))
return failed_criteria
@cliutils.args("--task", type=str, dest="task", required=False,
help="UUID of the task")
def use(self, task):
"""Set active task.
:param task: Task uuid.
"""
print("Using task: %s" % task)
db.task_get(task)
fileutils.update_globals_file("RALLY_TASK", task)
|
|
#!/usr/bin/env python
"""
Artificial Intelligence for Humans
Volume 3: Deep Learning and Neural Networks
Python Version
http://www.aifh.org
http://www.jeffheaton.com
Code repository:
https://github.com/jeffheaton/aifh
Copyright 2015 by Jeff Heaton
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
For more information on Heaton Research copyrights, licenses
and trademarks visit:
http://www.heatonresearch.com/copyright
"""
__author__ = 'jheaton'
import csv
import sys
import numpy as np
from aifh_error import AIFHError
from equilateral import Equilateral
class Normalize(object):
""" This class is used handle both nomalization and denormalization. The data is typically loaded in from a CSV
file. Methods are provided to also normalize and denormalize individual numbers.
"""
def __init__(self):
""" Setup the normalize class.
"""
self.header = []
self.column_map = {}
def load_csv(self, filename):
""" Load a CSV file. The CSV file is assumed to have column headers as the first row. The headers will be read,
and can be used to reference individual columns. The columns can also be referenced by index.
"""
result = []
first = True
with open(filename, 'rt') as f:
reader = csv.reader(f)
for row in reader:
if len(row) > 0:
if first:
first = False
self.header = row
else:
result.append(row)
for idx in range(0, len(self.header)):
self.column_map[self.header[idx]] = idx
return result
@staticmethod
def display_data(data_set):
""" Display a 2D data set to the console.
"""
for row in data_set:
print(row)
def max(self, data_set, col):
""" Obtain the maximum numeric value for the specified column.
Note: this will not convert text values to numeric, see make_numeric for that.
"""
col = self.resolve_column(col)
result = sys.float_info.min
for row in data_set:
result = max(result, row[col])
return result
def min(self, data_set, col):
""" Obtain the minimum numeric value for the specified column.
Note: this will not convert text values to numeric, see make_numeric for that.
"""
col = self.resolve_column(col)
result = sys.float_info.max
for row in data_set:
result = min(result, row[col])
return result
def make_col_numeric(self, data_set, col):
""" Make the specified column numeric. If non-numeric values exist in this column, an error will result.
"""
col = self.resolve_column(col)
for row in data_set:
row[col] = float(row[col])
def norm_col_range(self, data_set, col, normalized_low, normalized_high):
""" Perform range normalization on the specified column. The min/max will be calculated for the column and
all values will be normalized to the requested map.
"""
col = self.resolve_column(col)
# Obtain the high and low values for the column.
data_low = self.min(data_set, col)
data_high = self.max(data_set, col)
# Iterate over all rows and perform the normalization.
for row in data_set:
row[col] = ((row[col] - data_low) / (data_high - data_low)) \
* (normalized_high - normalized_low) + normalized_low
def build_class_map(self, data_set, col):
""" Build a class map. Return a dictionary that contains a mapping between each unique class in the specified
column and that class's assigned index. This is used to perform both one-of-n and equilateral encoding.
"""
col = self.resolve_column(col)
result = {}
index = 0
for row in data_set:
key = row[col]
if key not in result:
result[key] = index
index += 1
return result
def norm_col_one_of_n(self, data_set, col, classes, normalized_low, normalized_high):
""" Normalize a column using one-of-n. The classes parameter contains a map of the unique items in the
specified column. Typically this value is obtained by calling build_class_map.
"""
col = self.resolve_column(col)
for row in data_set:
key = row[col]
value = classes[key]
row.pop(col)
for i in range(0, len(classes)):
if i == value:
row.insert(col + i, normalized_high)
else:
row.insert(col + i, normalized_low)
def denorm_one_of_n(self, data):
""" Denormalize a single value, using one-of-n.
@param data: The data to denormalize.
@return: The index of the highest value
"""
max_value = 0
max_index = -1
for i in range(0, len(data)):
if max_index == -1 or data[i] > max_value:
max_value = data[i]
max_index = i
return max_index
def norm_col_equilateral(self, data_set, col, classes, normalized_low, normalized_high):
""" Normalize a column using equilateral. The classes parameter contains a map of the unique items in the
specified column. Typically this value is obtained by calling build_class_map.
"""
col = self.resolve_column(col)
eq = Equilateral(len(classes), normalized_low, normalized_high)
for row in data_set:
key = row[col]
value = classes[key]
row.pop(col)
vec = eq.encode(value)
for i in range(0, len(vec)):
row.insert(col + i, vec[i])
def resolve_column(self, col):
""" Resolve a column to an index. If the value is numeric then this value will be checked to make sure it is
a valid column index. If the column index is invalid, an error will occur. If the column is text, then
we check to see if it matches one of the headers. If no match can be found, then an error results.
"""
if type(col) is int:
# Handle an integer index.
if col < 0 or col >= len(self.column_map):
raise AIFHError("Column index out of range: " + str(col))
return col
else:
# Handle a string column name.
if col not in self.column_map:
raise AIFHError("Undefined column: " + col)
else:
return self.column_map[col]
def col_extract(self, data_set, col):
result = []
col = self.resolve_column(col)
for row in data_set:
result.append(row[col])
return result
def delete_unknowns(self, data_set):
""" Delete unknown data, any row that has one or more ? columns.
@param data_set: The data set.
"""
i = 0
while i < len(data_set):
row = data_set[i]
for col_data in row:
if col_data == "?":
del data_set[i]
break
i += 1
def col_delete(self, data_set, col):
""" Delete the specified column.
@param data_set: The data set to delete from.
@param col: The column to delete.
"""
col = self.resolve_column(col)
for row in data_set:
del row[col]
def col_replace(self, data_set, col, search_for, replace_with, others):
""" Replace values in the specified column.
@param data_set: The data set.
@param col: The column to replace.
@param search_for: The value we seek to replace.
@param replace_with: What to replace the specified value with.
@param others: What to set other values to.
"""
for row in data_set:
d = float(row[col])
if np.abs(d - search_for) < 0.0001:
row[col] = float(replace_with)
else:
row[col] = float(others)
|
|
#!/usr/bin/env python
#
# Copyright (C) 2013-2016 DNAnexus, Inc.
#
# This file is part of dx-toolkit (DNAnexus platform client libraries).
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import (print_function, unicode_literals)
import sys, os
import json
import argparse
import re
from collections import OrderedDict
import dxpy
from dxpy.templating.utils import (print_intro, get_name, get_version, get_metadata, Completer, get_ordinal_str,
prompt_for_var, prompt_for_yn, use_completer, get_language, language_options,
get_pattern, get_timeout, fill_in_name_and_ver, clean, create_files_from_templates)
from dxpy.utils.printing import fill, BOLD, UNDERLINE, DNANEXUS_LOGO, ENDC
from dxpy.app_categories import APP_CATEGORIES
from dxpy.utils.completer import InstanceTypesCompleter
from dxpy.utils.pretty_print import format_table
from dxpy.compat import wrap_stdio_in_codecs
wrap_stdio_in_codecs()
try:
import colorama
colorama.init()
except:
pass
IO_NAME_PATTERN = re.compile('^[a-zA-Z_][0-9a-zA-Z_]*$')
API_VERSION = '1.0.0'
parser = argparse.ArgumentParser(description="Create a source code directory for a DNAnexus app. You will be prompted for various metadata for the app as well as for its input and output specifications.")
parser.add_argument('--json-file', help='Use the metadata and IO spec found in the given file')
parser.add_argument('--language', help='Programming language of your app')
parser.add_argument('--template',
choices=["basic", "parallelized", "scatter-process-gather"], default='basic',
help='Execution pattern of your app')
parser.add_argument('name', help='Name of your app', nargs='?')
args = parser.parse_args()
if args.json_file is not None and not os.path.exists(args.json_file):
parser.error('File not found: ' + args.json_file)
def main(**kwargs):
"""
Entry point for dx-app-wizard.
Note that this function is not meant to be used as a subroutine in your program.
"""
manifest = []
print_intro(API_VERSION)
if args.json_file is not None:
with open(args.json_file, 'r') as json_file:
app_json = json.loads(json_file.read())
# Re-confirm the name
name = get_name(default=args.name or app_json.get('name'))
app_json['name'] = name
version = get_version(default=app_json.get('version'))
app_json['version'] = version
try:
os.mkdir(app_json['name'])
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % app_json['name']) + '\n')
sys.exit(1)
else:
##################
# BASIC METADATA #
##################
name = get_name(default=args.name)
try:
os.mkdir(name)
except:
sys.stderr.write(fill('''Unable to create a directory for %s, please check that it is a valid app name and the working directory exists and is writable.''' % name) + '\n')
sys.exit(1)
title, summary = get_metadata(API_VERSION)
version = get_version()
app_json = OrderedDict()
app_json["name"] = name
app_json["title"] = title or name
app_json['summary'] = summary or name
app_json["dxapi"] = API_VERSION
app_json["version"] = version
############
# IO SPECS #
############
class_completer = Completer(['int', 'float', 'string', 'boolean', 'hash',
'array:int', 'array:float', 'array:string', 'array:boolean',
'record', 'file', 'applet',
'array:record', 'array:file', 'array:applet'])
bool_completer = Completer(['true', 'false'])
print('')
print(BOLD() + 'Input Specification' + ENDC())
print('')
input_spec = True
input_names = []
printed_classes = False
if input_spec:
app_json['inputSpec'] = []
print(fill('You will now be prompted for each input parameter to your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['inputSpec']) + 1)
input_name = prompt_for_var(ordinal + ' input name (<ENTER> to finish)', allow_empty=True)
if input_name == '':
break
if input_name in input_names:
print(fill('Error: Cannot use the same input parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(input_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
input_names.append(input_name)
input_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your input parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string
''')
printed_classes = True
while True:
input_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if input_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
optional = prompt_for_yn('This is an optional parameter')
default_val = None
if optional and input_class in ['int', 'float', 'string', 'boolean']:
default_val = prompt_for_yn('A default value should be provided')
if default_val:
while True:
if input_class == 'boolean':
use_completer(bool_completer)
default_val = prompt_for_var(' Default value', choices=['true', 'false'])
use_completer()
elif input_class == 'string':
default_val = prompt_for_var(' Default value', allow_empty=True)
else:
default_val = prompt_for_var(' Default value')
try:
if input_class == 'boolean':
default_val = (default_val == 'true')
elif input_class == 'int':
default_val = int(default_val)
elif input_class == 'float':
default_val = float(default_val)
break
except:
print('Not a valid default value for the given class ' + input_class)
else:
default_val = None
# Fill in the input parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = input_name
if input_label != '':
parameter_json['label'] = input_label
parameter_json["class"] = input_class
parameter_json["optional"] = optional
if default_val is not None:
parameter_json['default'] = default_val
# Fill in patterns and blank help string
if input_class == 'file' or input_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['inputSpec'].append(parameter_json)
print('')
print(BOLD() + 'Output Specification' + ENDC())
print('')
output_spec = True
output_names = []
if output_spec:
app_json['outputSpec'] = []
print(fill('You will now be prompted for each output parameter of your app. Each parameter should have a unique name that uses only the underscore "_" and alphanumeric characters, and does not start with a number.'))
while True:
print('')
ordinal = get_ordinal_str(len(app_json['outputSpec']) + 1)
output_name = prompt_for_var(ordinal + ' output name (<ENTER> to finish)', allow_empty=True)
if output_name == '':
break
if output_name in output_names:
print(fill('Error: Cannot use the same output parameter name twice. Please choose again.'))
continue
if not IO_NAME_PATTERN.match(output_name):
print(fill('Error: Parameter names may use only underscore "_", ASCII letters, and digits; and may not start with a digit. Please choose again.'))
continue
output_names.append(output_name)
output_label = prompt_for_var('Label (optional human-readable name)', '')
use_completer(class_completer)
if not printed_classes:
print('Your output parameter must be of one of the following classes:')
print('''applet array:file array:record file int
array:applet array:float array:string float record
array:boolean array:int boolean hash string''')
printed_classes = True
while True:
output_class = prompt_for_var('Choose a class (<TAB> twice for choices)')
if output_class in class_completer.choices:
break
else:
print(fill('Not a recognized class; please choose again.'))
use_completer()
# Fill in the output parameter's JSON
parameter_json = OrderedDict()
parameter_json["name"] = output_name
if output_label != '':
parameter_json['label'] = output_label
parameter_json["class"] = output_class
# Fill in patterns and blank help string
if output_class == 'file' or output_class == 'array:file':
parameter_json["patterns"] = ["*"]
parameter_json["help"] = ""
app_json['outputSpec'].append(parameter_json)
required_file_input_names = []
optional_file_input_names = []
required_file_array_input_names = []
optional_file_array_input_names = []
file_output_names = []
if 'inputSpec' in app_json:
for param in app_json['inputSpec']:
may_be_missing = param['optional'] and "default" not in param
if param['class'] == 'file':
param_list = optional_file_input_names if may_be_missing else required_file_input_names
elif param['class'] == 'array:file':
param_list = optional_file_array_input_names if may_be_missing else required_file_array_input_names
else:
param_list = None
if param_list is not None:
param_list.append(param['name'])
if 'outputSpec' in app_json:
file_output_names = [param['name'] for param in app_json['outputSpec'] if param['class'] == 'file']
##################
# TIMEOUT POLICY #
##################
print('')
print(BOLD() + 'Timeout Policy' + ENDC())
app_json["runSpec"] = OrderedDict({})
app_json['runSpec'].setdefault('timeoutPolicy', {})
timeout, timeout_units = get_timeout(default=app_json['runSpec']['timeoutPolicy'].get('*'))
app_json['runSpec']['timeoutPolicy'].setdefault('*', {})
app_json['runSpec']['timeoutPolicy']['*'].setdefault(timeout_units, timeout)
########################
# LANGUAGE AND PATTERN #
########################
print('')
print(BOLD() + 'Template Options' + ENDC())
# Prompt for programming language if not specified
language = args.language if args.language is not None else get_language()
interpreter = language_options[language].get_interpreter()
app_json["runSpec"]["interpreter"] = interpreter
# Prompt the execution pattern iff the args.pattern is provided and invalid
template_dir = os.path.join(os.path.dirname(dxpy.__file__), 'templating', 'templates', language_options[language].get_path())
if not os.path.isdir(os.path.join(template_dir, args.template)):
print(fill('The execution pattern "' + args.template + '" is not available for your programming language.'))
pattern = get_pattern(template_dir)
else:
pattern = args.template
template_dir = os.path.join(template_dir, pattern)
with open(os.path.join(template_dir, 'dxapp.json'), 'r') as template_app_json_file:
file_text = fill_in_name_and_ver(template_app_json_file.read(), name, version)
template_app_json = json.loads(file_text)
for key in template_app_json['runSpec']:
app_json['runSpec'][key] = template_app_json['runSpec'][key]
if (language == args.language) and (pattern == args.template):
print('All template options are supplied in the arguments.')
##########################
# APP ACCESS PERMISSIONS #
##########################
print('')
print(BOLD('Access Permissions'))
print(fill('''If you request these extra permissions for your app, users will see this fact when launching your app, and certain other restrictions will apply. For more information, see ''' +
BOLD('https://wiki.dnanexus.com/App-Permissions') + '.'))
print('')
print(fill(UNDERLINE('Access to the Internet') + ' (other than accessing the DNAnexus API).'))
if prompt_for_yn("Will this app need access to the Internet?", default=False):
app_json.setdefault('access', {})
app_json['access']['network'] = ['*']
print(fill('App has full access to the Internet. To narrow access to specific sites, edit the ' +
UNDERLINE('access.network') + ' field of dxapp.json once we generate the app.'))
print('')
print(fill(UNDERLINE('Direct access to the parent project') + '''. This is not needed if your app specifies outputs,
which will be copied into the project after it's done running.'''))
if prompt_for_yn("Will this app need access to the parent project?", default=False):
app_json.setdefault('access', {})
app_json['access']['project'] = 'CONTRIBUTE'
print(fill('App has CONTRIBUTE access to the parent project. To change the access level or request access to ' +
'other projects, edit the ' + UNDERLINE('access.project') + ' and ' + UNDERLINE('access.allProjects') +
' fields of dxapp.json once we generate the app.'))
#######################
# SYSTEM REQUIREMENTS #
#######################
print('')
print(BOLD('System Requirements'))
print('')
print(BOLD('Common instance types:'))
print(format_table(InstanceTypesCompleter.preferred_instance_types.values(),
column_names=InstanceTypesCompleter.instance_types.values()[0]._fields))
print(fill(BOLD('Default instance type:') + ' The instance type you select here will apply to all entry points in ' +
'your app unless you override it. See ' +
BOLD('https://wiki.dnanexus.com/API-Specification-v1.0.0/Instance-Types') + ' for more information.'))
use_completer(InstanceTypesCompleter())
instance_type = prompt_for_var('Choose an instance type for your app',
default=InstanceTypesCompleter.default_instance_type.Name,
choices=list(InstanceTypesCompleter.instance_types))
app_json['runSpec'].setdefault('systemRequirements', {})
app_json['runSpec']['systemRequirements'].setdefault('*', {})
app_json['runSpec']['systemRequirements']['*']['instanceType'] = instance_type
######################
# HARDCODED DEFAULTS #
######################
# Default of no other authorizedUsers
# app_json['authorizedUsers'] = []
# print('\n' + BOLD('Linux version: '))
app_json['runSpec']['distribution'] = 'Ubuntu'
if any(instance_type.startswith(prefix) for prefix in ('mem1_hdd2', 'mem2_hdd2', 'mem3_hdd2')):
print(fill('Your app will run on Ubuntu 12.04. To use Ubuntu 14.04, select from the list of common instance ' +
'types above.'))
app_json['runSpec']['release'] = '12.04'
else:
app_json['runSpec']['release'] = '14.04'
print(fill('Your app has been configured to run on Ubuntu 14.04. To use Ubuntu 12.04, edit the ' +
BOLD('runSpec.release') + ' field of your dxapp.json.'))
#################
# WRITING FILES #
#################
print('')
print(BOLD() + '*** Generating ' + DNANEXUS_LOGO() + BOLD() + ' App Template... ***' + ENDC())
with open(os.path.join(name, 'dxapp.json'), 'w') as prog_file:
prog_file.write(clean(json.dumps(app_json, indent=2)) + '\n')
manifest.append(os.path.join(name, 'dxapp.json'))
print('')
print(fill('''Your app specification has been written to the
dxapp.json file. You can specify more app options by editing this file
directly (see https://wiki.dnanexus.com/Developer-Portal for complete
documentation).''' + (''' Note that without an input and output specification,
your app can only be built as an APPLET on the system. To publish it to
the DNAnexus community, you must first specify your inputs and outputs.
''' if not ('inputSpec' in app_json and 'outputSpec' in app_json) else "")))
print('')
for subdir in 'src', 'test', 'resources':
try:
os.mkdir(os.path.join(name, subdir))
manifest.append(os.path.join(name, subdir, ''))
except:
sys.stderr.write("Unable to create subdirectory %s/%s" % (name, subdir))
sys.exit(1)
entry_points = ['main']
if pattern == 'parallelized':
entry_points = ['main', 'process', 'postprocess']
elif pattern == 'scatter-process-gather':
entry_points = ['main', 'scatter', 'map', 'process', 'postprocess']
manifest += create_files_from_templates(template_dir, app_json, language,
required_file_input_names, optional_file_input_names,
required_file_array_input_names, optional_file_array_input_names,
file_output_names, pattern,
description='<!-- Insert a description of your app here -->',
entry_points=entry_points)
print("Created files:")
for filename in sorted(manifest):
print("\t", filename)
print("\n" + fill('''App directory created! See
https://wiki.dnanexus.com/Developer-Portal for tutorials on how to modify these files,
or run "dx build {n}" or "dx build --create-app {n}" while logged in with dx.'''.format(n=name)) + "\n")
print(fill('''Running the DNAnexus build utility will create an executable on the DNAnexus platform. Any files found in the ''' +
BOLD() + 'resources' + ENDC() +
''' directory will be uploaded so that they will be present in the root directory when the executable is run.'''))
if __name__ == '__main__':
main()
|
|
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
import unittest
import os
from monty.serialization import loadfn
import warnings
import numpy as np
import multiprocessing
from pymatgen.analysis.pourbaix_diagram import PourbaixDiagram, PourbaixEntry,\
PourbaixPlotter, IonEntry, MultiEntry
from pymatgen.entries.computed_entries import ComputedEntry
from pymatgen.core.ion import Ion
from pymatgen import SETTINGS
test_dir = os.path.join(os.path.dirname(__file__), "..", "..", "..",
'test_files')
class PourbaixEntryTest(unittest.TestCase):
_multiprocess_shared_ = True
"""
Test all functions using a fictitious entry
"""
def setUp(self):
# comp = Composition("Mn2O3")
self.solentry = ComputedEntry("Mn2O3", 49)
ion = Ion.from_formula("MnO4-")
self.ionentry = IonEntry(ion, 25)
self.PxIon = PourbaixEntry(self.ionentry)
self.PxSol = PourbaixEntry(self.solentry)
self.PxIon.concentration = 1e-4
def test_pourbaix_entry(self):
self.assertEqual(self.PxIon.entry.energy, 25, "Wrong Energy!")
self.assertEqual(self.PxIon.entry.name,
"MnO4[-]", "Wrong Entry!")
self.assertEqual(self.PxSol.entry.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxSol.entry.name,
"Mn2O3", "Wrong Entry!")
# self.assertEqual(self.PxIon.energy, 25, "Wrong Energy!")
# self.assertEqual(self.PxSol.energy, 49, "Wrong Energy!")
self.assertEqual(self.PxIon.concentration, 1e-4, "Wrong concentration!")
def test_calc_coeff_terms(self):
self.assertEqual(self.PxIon.npH, -8, "Wrong npH!")
self.assertEqual(self.PxIon.nPhi, -7, "Wrong nPhi!")
self.assertEqual(self.PxIon.nH2O, 4, "Wrong nH2O!")
self.assertEqual(self.PxSol.npH, -6, "Wrong npH!")
self.assertEqual(self.PxSol.nPhi, -6, "Wrong nPhi!")
self.assertEqual(self.PxSol.nH2O, 3, "Wrong nH2O!")
def test_to_from_dict(self):
d = self.PxIon.as_dict()
ion_entry = self.PxIon.from_dict(d)
self.assertEqual(ion_entry.entry.name, "MnO4[-]", "Wrong Entry!")
d = self.PxSol.as_dict()
sol_entry = self.PxSol.from_dict(d)
self.assertEqual(sol_entry.name, "Mn2O3(s)", "Wrong Entry!")
self.assertEqual(sol_entry.energy, self.PxSol.energy,
"as_dict and from_dict energies unequal")
def test_energy_functions(self):
# TODO: test these for values
self.PxSol.energy_at_conditions(10, 0)
self.PxSol.energy_at_conditions(np.array([1, 2, 3]), 0)
self.PxSol.energy_at_conditions(10, np.array([1, 2, 3]))
self.PxSol.energy_at_conditions(np.array([1, 2, 3]),
np.array([1, 2, 3]))
def test_multi_entry(self):
# TODO: More robust multientry test
m_entry = MultiEntry([self.PxSol, self.PxIon])
for attr in ['energy', 'composition', 'nPhi']:
self.assertEqual(getattr(m_entry, attr),
getattr(self.PxSol, attr) + getattr(self.PxIon, attr))
# As dict, from dict
m_entry_dict = m_entry.as_dict()
m_entry_new = MultiEntry.from_dict(m_entry_dict)
self.assertEqual(m_entry_new.energy, m_entry.energy)
class PourbaixDiagramTest(unittest.TestCase):
_multiprocess_shared_ = True
@classmethod
def setUpClass(cls):
cls.test_data = loadfn(os.path.join(test_dir, 'pourbaix_test_data.json'))
cls.pbx = PourbaixDiagram(cls.test_data['Zn'], filter_solids=True)
cls.pbx_nofilter = PourbaixDiagram(cls.test_data['Zn'],
filter_solids=False)
def test_pourbaix_diagram(self):
self.assertEqual(set([e.name for e in self.pbx.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match")
self.assertEqual(set([e.name for e in self.pbx_nofilter.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)",
"ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match")
pbx_lowconc = PourbaixDiagram(self.test_data['Zn'], conc_dict={"Zn": 1e-8},
filter_solids=True)
self.assertEqual(set([e.name for e in pbx_lowconc.stable_entries]),
{"Zn(HO)2(aq)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"})
def test_properties(self):
self.assertEqual(len(self.pbx.unstable_entries), 2)
def test_multicomponent(self):
# Assure no ions get filtered at high concentration
ag_n = [e for e in self.test_data['Ag-Te-N']
if not "Te" in e.composition]
highconc = PourbaixDiagram(ag_n, filter_solids=True,
conc_dict={"Ag": 1e-5, "N": 1})
entry_sets = [set(e.entry_id) for e in highconc.stable_entries]
self.assertIn({"mp-124", "ion-17"}, entry_sets)
# Binary system
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'], filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pd_binary.stable_entries), 30)
test_entry = pd_binary.find_stable_entry(8, 2)
self.assertTrue("mp-499" in test_entry.entry_id)
# Find a specific multientry to test
self.assertEqual(pd_binary.get_decomposition_energy(test_entry, 8, 2), 0)
self.assertEqual(pd_binary.get_decomposition_energy(
test_entry.entry_list[0], 8, 2), 0)
pd_ternary = PourbaixDiagram(self.test_data['Ag-Te-N'], filter_solids=True)
self.assertEqual(len(pd_ternary.stable_entries), 49)
ag = self.test_data['Ag-Te-N'][30]
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag, 2, -1), 0)
self.assertAlmostEqual(pd_ternary.get_decomposition_energy(ag, 10, -2), 0)
# Test invocation of pourbaix diagram from ternary data
new_ternary = PourbaixDiagram(pd_ternary.all_entries)
self.assertEqual(len(new_ternary.stable_entries), 49)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag, 2, -1), 0)
self.assertAlmostEqual(new_ternary.get_decomposition_energy(ag, 10, -2), 0)
def test_get_pourbaix_domains(self):
domains = PourbaixDiagram.get_pourbaix_domains(self.test_data['Zn'])
self.assertEqual(len(domains[0]), 7)
def test_get_decomposition(self):
# Test a stable entry to ensure that it's zero in the stable region
entry = self.test_data['Zn'][12] # Should correspond to mp-2133
self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, 10, 1),
0.0, 5, "Decomposition energy of ZnO is not 0.")
# Test an unstable entry to ensure that it's never zero
entry = self.test_data['Zn'][11]
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-2, 4))
result = self.pbx_nofilter.get_decomposition_energy(entry, ph, v)
self.assertTrue((result >= 0).all(),
"Unstable energy has hull energy of 0 or less")
# Test an unstable hydride to ensure HER correction works
self.assertAlmostEqual(self.pbx.get_decomposition_energy(entry, -3, -2),
11.093744395)
# Test a list of pHs
self.pbx.get_decomposition_energy(entry, np.linspace(0, 2, 5), 2)
# Test a list of Vs
self.pbx.get_decomposition_energy(entry, 4, np.linspace(-3, 3, 10))
# Test a set of matching arrays
ph, v = np.meshgrid(np.linspace(0, 14), np.linspace(-3, 3))
self.pbx.get_decomposition_energy(entry, ph, v)
def test_multielement_parallel(self):
# Simple test to ensure that multiprocessing is working
test_entries = self.test_data["Ag-Te-N"]
nproc = multiprocessing.cpu_count()
pbx = PourbaixDiagram(test_entries, filter_solids=True, nproc=nproc)
self.assertEqual(len(pbx.stable_entries), 49)
@unittest.skipIf(not SETTINGS.get("PMG_MAPI_KEY"),
"PMG_MAPI_KEY environment variable not set.")
def test_mpr_pipeline(self):
from pymatgen import MPRester
mpr = MPRester()
data = mpr.get_pourbaix_entries(["Zn"])
pbx = PourbaixDiagram(data, filter_solids=True, conc_dict={"Zn": 1e-8})
pbx.find_stable_entry(10, 0)
data = mpr.get_pourbaix_entries(["Ag", "Te"])
pbx = PourbaixDiagram(data, filter_solids=True,
conc_dict={"Ag": 1e-8, "Te": 1e-8})
self.assertEqual(len(pbx.stable_entries), 30)
test_entry = pbx.find_stable_entry(8, 2)
self.assertAlmostEqual(test_entry.energy, 2.3894017960000009, 3)
# Test custom ions
entries = mpr.get_pourbaix_entries(["Sn", "C", "Na"])
ion = IonEntry(Ion.from_formula("NaO28H80Sn12C24+"), -161.676)
custom_ion_entry = PourbaixEntry(ion, entry_id='my_ion')
pbx = PourbaixDiagram(entries + [custom_ion_entry], filter_solids=True,
comp_dict={"Na": 1, "Sn": 12, "C": 24})
self.assertAlmostEqual(pbx.get_decomposition_energy(custom_ion_entry, 5, 2),
8.31202738629504, 1)
def test_nofilter(self):
entries = self.test_data['Ag-Te']
pbx = PourbaixDiagram(entries)
pbx.get_decomposition_energy(entries[0], 0, 0)
def test_solid_filter(self):
entries = self.test_data['Ag-Te-N']
pbx = PourbaixDiagram(entries, filter_solids=True)
pbx.get_decomposition_energy(entries[0], 0, 0)
def test_serialization(self):
d = self.pbx.as_dict()
new = PourbaixDiagram.from_dict(d)
self.assertEqual(set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)"},
"List of stable entries does not match")
# Test with unprocessed entries included, this should result in the
# previously filtered entries being included
d = self.pbx.as_dict(include_unprocessed_entries=True)
new = PourbaixDiagram.from_dict(d)
self.assertEqual(
set([e.name for e in new.stable_entries]),
{"ZnO(s)", "Zn[2+]", "ZnHO2[-]", "ZnO2[2-]", "Zn(s)", "ZnO2(s)", "ZnH(s)"},
"List of stable entries for unfiltered pbx does not match")
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'], filter_solids=True,
comp_dict={"Ag": 0.5, "Te": 0.5},
conc_dict={"Ag": 1e-8, "Te": 1e-8})
new_binary = PourbaixDiagram.from_dict(pd_binary.as_dict())
self.assertEqual(len(pd_binary.stable_entries),
len(new_binary.stable_entries))
class PourbaixPlotterTest(unittest.TestCase):
def setUp(self):
warnings.simplefilter("ignore")
self.test_data = loadfn(os.path.join(test_dir, "pourbaix_test_data.json"))
self.pd = PourbaixDiagram(self.test_data["Zn"])
self.plotter = PourbaixPlotter(self.pd)
def tearDown(self):
warnings.simplefilter("default")
def test_plot_pourbaix(self):
plotter = PourbaixPlotter(self.pd)
# Default limits
plotter.get_pourbaix_plot()
# Non-standard limits
plotter.get_pourbaix_plot(limits=[[-5, 4], [-2, 2]])
def test_plot_entry_stability(self):
entry = self.pd.all_entries[0]
self.plotter.plot_entry_stability(entry, limits=[[-2, 14], [-3, 3]])
# binary system
pd_binary = PourbaixDiagram(self.test_data['Ag-Te'],
comp_dict = {"Ag": 0.5, "Te": 0.5})
binary_plotter = PourbaixPlotter(pd_binary)
test_entry = pd_binary._unprocessed_entries[0]
plt = binary_plotter.plot_entry_stability(test_entry)
plt.close()
if __name__ == '__main__':
unittest.main()
|
|
# -*- coding: utf-8 -*-
"""
sphinx.ext.todo
~~~~~~~~~~~~~~~
Allow todos to be inserted into your documentation. Inclusion of todos can
be switched of by a configuration variable. The todolist directive collects
all todos of your project and lists them along with a backlink to the
original location.
:copyright: Copyright 2007-2016 by the Sphinx team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from docutils import nodes
from docutils.parsers.rst import directives
import sphinx
from sphinx.locale import _
from sphinx.environment import NoUri
from sphinx.util.nodes import set_source_info
from docutils.parsers.rst import Directive
from docutils.parsers.rst.directives.admonitions import BaseAdmonition
class todo_node(nodes.Admonition, nodes.Element):
pass
class todolist(nodes.General, nodes.Element):
pass
class Todo(BaseAdmonition):
"""
A todo entry, displayed (if configured) in the form of an admonition.
"""
node_class = todo_node
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {
'class': directives.class_option,
}
def run(self):
if not self.options.get('class'):
self.options['class'] = ['admonition-todo']
(todo,) = super(Todo, self).run()
if isinstance(todo, nodes.system_message):
return [todo]
todo.insert(0, nodes.title(text=_('Todo')))
set_source_info(self, todo)
env = self.state.document.settings.env
targetid = 'index-%s' % env.new_serialno('index')
targetnode = nodes.target('', '', ids=[targetid])
return [targetnode, todo]
def process_todos(app, doctree):
# collect all todos in the environment
# this is not done in the directive itself because it some transformations
# must have already been run, e.g. substitutions
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
for node in doctree.traverse(todo_node):
app.emit('todo-defined', node)
try:
targetnode = node.parent[node.parent.index(node) - 1]
if not isinstance(targetnode, nodes.target):
raise IndexError
except IndexError:
targetnode = None
newnode = node.deepcopy()
del newnode['ids']
env.todo_all_todos.append({
'docname': env.docname,
'source': node.source or env.doc2path(env.docname),
'lineno': node.line,
'todo': newnode,
'target': targetnode,
})
if env.config.todo_emit_warnings:
env.warn_node("TODO entry found: %s" % node[1].astext(), node)
class TodoList(Directive):
"""
A list of all todo entries.
"""
has_content = False
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = False
option_spec = {}
def run(self):
# Simply insert an empty todolist node which will be replaced later
# when process_todo_nodes is called
return [todolist('')]
def process_todo_nodes(app, doctree, fromdocname):
if not app.config['todo_include_todos']:
for node in doctree.traverse(todo_node):
node.parent.remove(node)
# Replace all todolist nodes with a list of the collected todos.
# Augment each todo with a backlink to the original location.
env = app.builder.env
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
for node in doctree.traverse(todolist):
if not app.config['todo_include_todos']:
node.replace_self([])
continue
content = []
for todo_info in env.todo_all_todos:
para = nodes.paragraph(classes=['todo-source'])
if app.config['todo_link_only']:
description = _('<<original entry>>')
else:
description = (
_('(The <<original entry>> is located in %s, line %d.)') %
(todo_info['source'], todo_info['lineno'])
)
desc1 = description[:description.find('<<')]
desc2 = description[description.find('>>') + 2:]
para += nodes.Text(desc1, desc1)
# Create a reference
newnode = nodes.reference('', '', internal=True)
innernode = nodes.emphasis(_('original entry'), _('original entry'))
try:
newnode['refuri'] = app.builder.get_relative_uri(
fromdocname, todo_info['docname'])
newnode['refuri'] += '#' + todo_info['target']['refid']
except NoUri:
# ignore if no URI can be determined, e.g. for LaTeX output
pass
newnode.append(innernode)
para += newnode
para += nodes.Text(desc2, desc2)
# (Recursively) resolve references in the todo content
todo_entry = todo_info['todo']
env.resolve_references(todo_entry, todo_info['docname'],
app.builder)
# Insert into the todolist
content.append(todo_entry)
content.append(para)
node.replace_self(content)
def purge_todos(app, env, docname):
if not hasattr(env, 'todo_all_todos'):
return
env.todo_all_todos = [todo for todo in env.todo_all_todos
if todo['docname'] != docname]
def merge_info(app, env, docnames, other):
if not hasattr(other, 'todo_all_todos'):
return
if not hasattr(env, 'todo_all_todos'):
env.todo_all_todos = []
env.todo_all_todos.extend(other.todo_all_todos)
def visit_todo_node(self, node):
self.visit_admonition(node)
# self.visit_admonition(node, 'todo')
def depart_todo_node(self, node):
self.depart_admonition(node)
def setup(app):
app.add_event('todo-defined')
app.add_config_value('todo_include_todos', False, 'html')
app.add_config_value('todo_link_only', False, 'html')
app.add_config_value('todo_emit_warnings', False, 'html')
app.add_node(todolist)
app.add_node(todo_node,
html=(visit_todo_node, depart_todo_node),
latex=(visit_todo_node, depart_todo_node),
text=(visit_todo_node, depart_todo_node),
man=(visit_todo_node, depart_todo_node),
texinfo=(visit_todo_node, depart_todo_node))
app.add_directive('todo', Todo)
app.add_directive('todolist', TodoList)
app.connect('doctree-read', process_todos)
app.connect('doctree-resolved', process_todo_nodes)
app.connect('env-purge-doc', purge_todos)
app.connect('env-merge-info', merge_info)
return {'version': sphinx.__display_version__, 'parallel_read_safe': True}
|
|
from unittest.mock import patch
import json
import domain_api
from domain_api.epp.entity import EppRpcClient
from ..exceptions import EppError
from .test_setup import TestSetup
class MockRpcClient(domain_api.epp.entity.EppRpcClient):
def __init__(self, host=None):
pass
class TestCheckDomain(TestSetup):
def setUp(self):
"""
Set up test suite
"""
super().setUp()
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_epp_error(self):
"""
An epp error should result in a 400 bad request.
"""
self.login_client()
with patch.object(EppRpcClient, 'call', side_effect=EppError("FAIL")):
response = self.client.get(
'/v1/domains/test-something.bar/'
)
self.assertEqual(response.status_code,
400,
"EPP error caused a 400 bad request.")
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_check_domain_response(self):
"""
EPP check domain result returns serialized json response.
"""
self.login_client()
return_data = {
"domain:chkData": {
"domain:cd": {
"domain:name": {
"avail": 1,
"$t": "whatever.ote"
}
}
}
}
with patch.object(EppRpcClient, 'call', return_value=return_data):
response = self.client.get(
'/v1/available/whatever.ote/'
)
self.assertEqual(response.status_code,
200,
"Epp returned normally")
data = response.data
self.assertTrue(data["available"],
"Serialised a check_domain response")
class TestInfoDomain(TestSetup):
"""
Test info domain functionality
"""
def setUp(self):
"""
Set up test suite
"""
super().setUp()
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_info_domain_response(self):
"""
Test processing of info domain response
"""
self.login_client()
return_value = {
"domain:infData": {
"domain:name": "whatever.ote",
"domain:status": "ok",
"domain:registrant": "R1234",
"domain:ns": [
{"domain:hostObj": "ns1.nameserver.com"},
{"domain:hostObj": "ns2.nameserver.com"}
],
"domain:contact": [
{"$t": "A1234", "type": "admin"},
{"$t": "T1234", "type": "tech"}
]
}
}
with patch.object(EppRpcClient, 'call', return_value=return_value):
response = self.client.get(
'/v1/domains/test-something.bar/'
)
self.assertEqual(response.status_code,
200,
"Epp returned normally")
class TestUpdateDomain(TestSetup):
"""
Test some commands to update domains
"""
def setUp(self):
"""
Set up test suite
"""
super().setUp()
def test_noop_update_domain(self):
jwt_header = self.api_login()
update_domain = {
"domain": "test-something.bar",
"registrant": 1,
"contacts": [{"admin": 3}, {"tech": 4}]
}
response = self.client.patch(
'/v1/domains/test-something.bar/',
data=json.dumps(update_domain),
content_type="application/json",
HTTP_AUTHORIZATION=jwt_header
)
self.assertEqual(response.status_code,
200,
"Epp returned normally")
data = response.data
self.assertEqual(data["msg"],
"No change to domain",
"Received a 'no change' response")
class TestContact(TestSetup):
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_info_contact(self):
"""
Test basic info contact
"""
info_contact_response = {
"contact:infData": {
"xmlns:contact": "urn:ietf:params:xml:ns:contact-1.0",
"xsi:schemaLocation": "urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd",
"contact:id": "contact-123",
"contact:roid": "78442-CoCCA",
"contact:status": [
{
"s": "linked",
"$t": "In use by 10 domains"
},
{
"s": "serverDeleteProhibited",
"$t": "Server locked: This contact is a domain registrant"
},
{
"s": "serverTransferProhibited",
"$t": "Server locked: This contact is a domain registrant"
},
{
"s": "serverUpdateProhibited",
"$t": "Server locked: This contact is a domain registrant"
}
],
"contact:postalInfo": {
"type": "loc",
"contact:name": "Tester MacTesterson",
"contact:addr": {
"contact:street": "Haribo",
"contact:city": "Munich",
"contact:sp": "Bayern",
"contact:pc": "48392",
"contact:cc": "DE"
}
},
"contact:voice": "+49.89444134",
"contact:email": "[email protected]",
"contact:clID": "catalyst_ote",
"contact:crID": "catalyst_ote",
"contact:crDate": "2017-03-03T10:06:33.063Z",
"contact:upDate": "2017-03-05T21:22:07.154Z",
"contact:upID": "catalyst_ote",
"contact:disclose": {
"flag": "0",
"contact:name": [{"type": "loc"}, {"type": "int"}],
"contact:org": [{"type": "loc"}, {"type": "int"}],
"contact:addr": [{"type": "loc"}, {"type": "int"}],
"contact:voice": {},
"contact:fax": {},
"contact:email": {}
}
}
}
jwt_header = self.api_login(username='testadmin',
password='1nn0vation')
print("JWT: %s" % jwt_header)
with patch.object(EppRpcClient,
'call',
return_value=info_contact_response):
response = self.client.get('/v1/contacts/contact-123/',
HTTP_AUTHORIZATION=jwt_header)
self.assertEqual(response.status_code,
200,
"Info contact returned normal response")
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_info_contact_non_owner(self):
"""
Test basic info contact for different user.
"""
info_contact_response = {
"contact:infData": {
"xmlns:contact": "urn:ietf:params:xml:ns:contact-1.0",
"xsi:schemaLocation": "urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd",
"contact:id": "contact-124",
"contact:roid": "78442-CoCCA",
"contact:status": [
{
"s": "linked",
"$t": "In use by 10 domains"
},
{
"s": "serverDeleteProhibited",
"$t": "Server locked: This contact is a domain registrant"
},
{
"s": "serverTransferProhibited",
"$t": "Server locked: This contact is a domain registrant"
},
{
"s": "serverUpdateProhibited",
"$t": "Server locked: This contact is a domain registrant"
}
],
"contact:postalInfo": {
"type": "loc",
"contact:name": "Tester MacTesterson",
"contact:addr": {
"contact:street": "Haribo",
"contact:city": "Munich",
"contact:sp": "Bayern",
"contact:pc": "48392",
"contact:cc": "DE"
}
},
"contact:voice": "+49.89444134",
"contact:email": "[email protected]",
"contact:clID": "catalyst_ote",
"contact:crID": "catalyst_ote",
"contact:crDate": "2017-03-03T10:06:33.063Z",
"contact:upDate": "2017-03-05T21:22:07.154Z",
"contact:upID": "catalyst_ote",
"contact:disclose": {
"flag": "0",
"contact:name": [{"type": "loc"}, {"type": "int"}],
"contact:org": [{"type": "loc"}, {"type": "int"}],
"contact:addr": [{"type": "loc"}, {"type": "int"}],
"contact:voice": {},
"contact:fax": {},
"contact:email": {}
}
}
}
jwt_header = self.api_login(username='testadmin',
password='1nn0vation')
with patch.object(EppRpcClient,
'call',
return_value=info_contact_response):
response = self.client.get('/v1/contacts/contact-321/',
HTTP_AUTHORIZATION=jwt_header)
self.assertEqual(response.status_code,
200,
"Info contact returned normal response")
class TestRegistrant(TestSetup):
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_info_registrant(self):
"""
Test basic info registrant
"""
self.login_client()
info_contact_response = {
"contact:infData": {
"xmlns:contact": "urn:ietf:params:xml:ns:contact-1.0",
"xsi:schemaLocation": "urn:ietf:params:xml:ns:contact-1.0 contact-1.0.xsd",
"contact:id": "registrant-123",
"contact:roid": "78442-CoCCA",
"contact:status": [
{
"s": "linked",
"$t": "In use by 10 domains"
},
{
"s": "serverDeleteProhibited",
"$t": "Server locked: This contact is a domain registrant"
},
{
"s": "serverTransferProhibited",
"$t": "Server locked: This contact is a domain registrant"
},
{
"s": "serverUpdateProhibited",
"$t": "Server locked: This contact is a domain registrant"
}
],
"contact:postalInfo": {
"type": "loc",
"contact:name": "Tester MacTesterson",
"contact:addr": {
"contact:street": "Haribo",
"contact:city": "Munich",
"contact:sp": "Bayern",
"contact:pc": "48392",
"contact:cc": "DE"
}
},
"contact:voice": "+49.89444134",
"contact:email": "[email protected]",
"contact:clID": "catalyst_ote",
"contact:crID": "catalyst_ote",
"contact:crDate": "2017-03-03T10:06:33.063Z",
"contact:upDate": "2017-03-05T21:22:07.154Z",
"contact:upID": "catalyst_ote",
"contact:disclose": {
"flag": "0",
"contact:name": [ { "type": "loc" }, { "type": "int" } ],
"contact:org": [ { "type": "loc" }, { "type": "int" } ],
"contact:addr": [ { "type": "loc" }, { "type": "int" } ],
"contact:voice": {},
"contact:fax": {},
"contact:email": {}
}
}
}
with patch.object(EppRpcClient,
'call',
return_value=info_contact_response):
jwt_header = self.api_login()
response = self.client.get('/v1/registrants/registrant-123/',
content_type="application/json",
HTTP_AUTHORIZATION=jwt_header)
self.assertEqual(response.status_code,
200,
"Info contact returned normal response")
class TestBasicQueries(TestSetup):
def test_unauthenticated_endpoint_denied(self):
"""
Test accessing an endpoint without JWT is denied.
"""
response = self.client.get('/v1/account-details/1/')
self.assertEqual(response.status_code,
403,
"Not allowed to access endoint without JWT")
def test_authenticateded_endpoint_accepted(self):
"""
Test accessing an endpoint with JWT is allowed.
"""
jwt_header = self.api_login()
joes_id = self.joe_user.pk
path = "/v1/account-details/%s/" % joes_id
response = self.client.get(path,
HTTP_AUTHORIZATION=jwt_header)
self.assertEqual(response.status_code,
200,
"Allowed to request endpoint with JWT.")
def test_unauthorized_endpoint_denied(self):
"""
Test access to admin level object denied.
"""
jwt_header = self.api_login()
response = self.client.get('/v1/tld-providers/',
HTTP_AUTHORIZATION=jwt_header)
self.assertEqual(response.status_code,
403,
"Normal logged in user cannot access tld-provider")
@patch('domain_api.epp.entity.EppRpcClient', new=MockRpcClient)
def test_check_domain_response(self):
"""
Check domain using JWT to authenticate
"""
jwt_header = self.api_login()
return_data = {
"domain:chkData": {
"domain:cd": {
"domain:name": {
"avail": 1,
"$t": "whatever.ote"
}
}
}
}
with patch.object(EppRpcClient, 'call', return_value=return_data):
response = self.client.get(
'/v1/available/whatever.ote/',
HTTP_AUTHORIZATION=jwt_header
)
self.assertEqual(response.status_code,
200,
"Epp returned normally")
data = response.data
self.assertTrue(data["available"],
"Serialised a check_domain response")
|
|
#!/usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Javelin is a tool for creating, verifying, and deleting a small set of
resources in a declarative way.
Javelin is meant to be used as a way to validate quickly that resources can
survive an upgrade process.
Authentication
--------------
Javelin will be creating (and removing) users and tenants so it needs the admin
credentials of your cloud to operate properly. The corresponding info can be
given the usual way, either through CLI options or environment variables.
You're probably familiar with these, but just in case::
+----------+------------------+----------------------+
| Param | CLI | Environment Variable |
+----------+------------------+----------------------+
| Username | --os-username | OS_USERNAME |
| Password | --os-password | OS_PASSWORD |
| Tenant | --os-tenant-name | OS_TENANT_NAME |
+----------+------------------+----------------------+
Runtime Arguments
-----------------
**-m/--mode**: (Required) Has to be one of 'check', 'create' or 'destroy'. It
indicates which actions javelin is going to perform.
**-r/--resources**: (Required) The path to a YAML file describing the resources
used by Javelin.
**-d/--devstack-base**: (Required) The path to the devstack repo used to
retrieve artefacts (like images) that will be referenced in the resource files.
**-c/--config-file**: (Optional) The path to a valid Tempest config file
describing your cloud. Javelin may use this to determine if certain services
are enabled and modify its behavior accordingly.
Resource file
-------------
The resource file is a valid YAML file describing the resources that will be
created, checked and destroyed by javelin. Here's a canonical example of a
resource file::
tenants:
- javelin
- discuss
users:
- name: javelin
pass: gungnir
tenant: javelin
- name: javelin2
pass: gungnir2
tenant: discuss
# resources that we want to create
images:
- name: javelin_cirros
owner: javelin
file: cirros-0.3.2-x86_64-blank.img
disk_format: ami
container_format: ami
aki: cirros-0.3.2-x86_64-vmlinuz
ari: cirros-0.3.2-x86_64-initrd
servers:
- name: peltast
owner: javelin
flavor: m1.small
image: javelin_cirros
floating_ip_pool: public
- name: hoplite
owner: javelin
flavor: m1.medium
image: javelin_cirros
An important piece of the resource definition is the *owner* field, which is
the user (that we've created) that is the owner of that resource. All
operations on that resource will happen as that regular user to ensure that
admin level access does not mask issues.
The check phase will act like a unit test, using well known assert methods to
verify that the correct resources exist.
"""
import argparse
import collections
import datetime
import os
import sys
import unittest
import netaddr
from oslo_log import log as logging
from oslo_utils import timeutils
import six
import yaml
from tempest.common import identity
from tempest.common import waiters
from tempest import config
from tempest.lib import auth
from tempest.lib import exceptions as lib_exc
from tempest.lib.services.compute import flavors_client
from tempest.lib.services.compute import floating_ips_client
from tempest.lib.services.compute import security_group_rules_client
from tempest.lib.services.compute import security_groups_client
from tempest.lib.services.compute import servers_client
from tempest.lib.services.network import subnets_client
from tempest.services.identity.v2.json import identity_client
from tempest.services.identity.v2.json import roles_client
from tempest.services.identity.v2.json import tenants_client
from tempest.services.identity.v2.json import users_client
from tempest.services.image.v2.json import images_client
from tempest.services.network.json import network_client
from tempest.services.network.json import routers_client
from tempest.services.object_storage import container_client
from tempest.services.object_storage import object_client
from tempest.services.telemetry.json import alarming_client
from tempest.services.telemetry.json import telemetry_client
from tempest.services.volume.v1.json import volumes_client
CONF = config.CONF
OPTS = {}
USERS = {}
RES = collections.defaultdict(list)
LOG = None
JAVELIN_START = datetime.datetime.utcnow()
class OSClient(object):
_creds = None
identity = None
servers = None
def __init__(self, user, pw, tenant):
default_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
default_params_with_timeout_values = {
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
default_params_with_timeout_values.update(default_params)
compute_params = {
'service': CONF.compute.catalog_type,
'region': CONF.compute.region or CONF.identity.region,
'endpoint_type': CONF.compute.endpoint_type,
'build_interval': CONF.compute.build_interval,
'build_timeout': CONF.compute.build_timeout
}
compute_params.update(default_params)
object_storage_params = {
'service': CONF.object_storage.catalog_type,
'region': CONF.object_storage.region or CONF.identity.region,
'endpoint_type': CONF.object_storage.endpoint_type
}
object_storage_params.update(default_params)
_creds = auth.KeystoneV2Credentials(
username=user,
password=pw,
tenant_name=tenant)
auth_provider_params = {
'disable_ssl_certificate_validation':
CONF.identity.disable_ssl_certificate_validation,
'ca_certs': CONF.identity.ca_certificates_file,
'trace_requests': CONF.debug.trace_requests
}
_auth = auth.KeystoneV2AuthProvider(
_creds, CONF.identity.uri, **auth_provider_params)
self.identity = identity_client.IdentityClient(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
self.tenants = tenants_client.TenantsClient(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
self.roles = roles_client.RolesClient(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
self.users = users_client.UsersClient(
_auth,
CONF.identity.catalog_type,
CONF.identity.region,
endpoint_type='adminURL',
**default_params_with_timeout_values)
self.servers = servers_client.ServersClient(_auth,
**compute_params)
self.flavors = flavors_client.FlavorsClient(_auth,
**compute_params)
self.floating_ips = floating_ips_client.FloatingIPsClient(
_auth, **compute_params)
self.secgroups = security_groups_client.SecurityGroupsClient(
_auth, **compute_params)
self.secrules = security_group_rules_client.SecurityGroupRulesClient(
_auth, **compute_params)
self.objects = object_client.ObjectClient(_auth,
**object_storage_params)
self.containers = container_client.ContainerClient(
_auth, **object_storage_params)
self.images = images_client.ImagesClientV2(
_auth,
CONF.image.catalog_type,
CONF.image.region or CONF.identity.region,
endpoint_type=CONF.image.endpoint_type,
build_interval=CONF.image.build_interval,
build_timeout=CONF.image.build_timeout,
**default_params)
self.telemetry = telemetry_client.TelemetryClient(
_auth,
CONF.telemetry.catalog_type,
CONF.identity.region,
endpoint_type=CONF.telemetry.endpoint_type,
**default_params_with_timeout_values)
self.alarming = alarming_client.AlarmingClient(
_auth,
CONF.alarm.catalog_type,
CONF.identity.region,
endpoint_type=CONF.alarm.endpoint_type,
**default_params_with_timeout_values)
self.volumes = volumes_client.VolumesClient(
_auth,
CONF.volume.catalog_type,
CONF.volume.region or CONF.identity.region,
endpoint_type=CONF.volume.endpoint_type,
build_interval=CONF.volume.build_interval,
build_timeout=CONF.volume.build_timeout,
**default_params)
self.networks = network_client.NetworkClient(
_auth,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**default_params)
self.routers = routers_client.RoutersClient(
_auth,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**default_params)
self.subnets = subnets_client.SubnetsClient(
_auth,
CONF.network.catalog_type,
CONF.network.region or CONF.identity.region,
endpoint_type=CONF.network.endpoint_type,
build_interval=CONF.network.build_interval,
build_timeout=CONF.network.build_timeout,
**default_params)
def load_resources(fname):
"""Load the expected resources from a yaml file."""
return yaml.load(open(fname, 'r'))
def keystone_admin():
return OSClient(OPTS.os_username, OPTS.os_password, OPTS.os_tenant_name)
def client_for_user(name):
LOG.debug("Entering client_for_user")
if name in USERS:
user = USERS[name]
LOG.debug("Created client for user %s" % user)
return OSClient(user['name'], user['pass'], user['tenant'])
else:
LOG.error("%s not found in USERS: %s" % (name, USERS))
###################
#
# TENANTS
#
###################
def create_tenants(tenants):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
admin = keystone_admin()
body = admin.tenants.list_tenants()['tenants']
existing = [x['name'] for x in body]
for tenant in tenants:
if tenant not in existing:
admin.tenants.create_tenant(tenant)['tenant']
else:
LOG.warning("Tenant '%s' already exists in this environment"
% tenant)
def destroy_tenants(tenants):
admin = keystone_admin()
for tenant in tenants:
tenant_id = identity.get_tenant_by_name(admin.tenant, tenant)['id']
admin.tenants.delete_tenant(tenant_id)
##############
#
# USERS
#
##############
def _users_for_tenant(users, tenant):
u_for_t = []
for user in users:
for n in user:
if user[n]['tenant'] == tenant:
u_for_t.append(user[n])
return u_for_t
def _tenants_from_users(users):
tenants = set()
for user in users:
for n in user:
tenants.add(user[n]['tenant'])
return tenants
def _assign_swift_role(user, swift_role):
admin = keystone_admin()
roles = admin.roles.list_roles()
role = next(r for r in roles if r['name'] == swift_role)
LOG.debug(USERS[user])
try:
admin.roles.assign_user_role(
USERS[user]['tenant_id'],
USERS[user]['id'],
role['id'])
except lib_exc.Conflict:
# don't care if it's already assigned
pass
def create_users(users):
"""Create tenants from resource definition.
Don't create the tenants if they already exist.
"""
global USERS
LOG.info("Creating users")
admin = keystone_admin()
for u in users:
try:
tenant = identity.get_tenant_by_name(admin.tenants, u['tenant'])
except lib_exc.NotFound:
LOG.error("Tenant: %s - not found" % u['tenant'])
continue
try:
identity.get_user_by_username(admin.tenants,
tenant['id'], u['name'])
LOG.warning("User '%s' already exists in this environment"
% u['name'])
except lib_exc.NotFound:
admin.users.create_user(
u['name'], u['pass'], tenant['id'],
"%s@%s" % (u['name'], tenant['id']),
enabled=True)
def destroy_users(users):
admin = keystone_admin()
for user in users:
tenant_id = identity.get_tenant_by_name(admin.tenants,
user['tenant'])['id']
user_id = identity.get_user_by_username(admin.tenants,
tenant_id, user['name'])['id']
admin.users.delete_user(user_id)
def collect_users(users):
global USERS
LOG.info("Collecting users")
admin = keystone_admin()
for u in users:
tenant = identity.get_tenant_by_name(admin.tenants, u['tenant'])
u['tenant_id'] = tenant['id']
USERS[u['name']] = u
body = identity.get_user_by_username(admin.tenants,
tenant['id'], u['name'])
USERS[u['name']]['id'] = body['id']
class JavelinCheck(unittest.TestCase):
def __init__(self, users, resources):
super(JavelinCheck, self).__init__()
self.users = users
self.res = resources
def runTest(self, *args):
pass
def _ping_ip(self, ip_addr, count, namespace=None):
if namespace is None:
ping_cmd = "ping -c1 " + ip_addr
else:
ping_cmd = "sudo ip netns exec %s ping -c1 %s" % (namespace,
ip_addr)
for current in range(count):
return_code = os.system(ping_cmd)
if return_code is 0:
break
self.assertNotEqual(current, count - 1,
"Server is not pingable at %s" % ip_addr)
def check(self):
self.check_users()
self.check_objects()
self.check_servers()
self.check_volumes()
self.check_telemetry()
self.check_secgroups()
# validate neutron is enabled and ironic disabled:
# Tenant network isolation is not supported when using ironic.
# "admin" has set up a neutron flat network environment within a shared
# fixed network for all tenants to use.
# In this case, network/subnet/router creation can be skipped and the
# server booted the same as nova network.
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
self.check_networking()
def check_users(self):
"""Check that the users we expect to exist, do.
We don't use the resource list for this because we need to validate
that things like tenantId didn't drift across versions.
"""
LOG.info("checking users")
for name, user in six.iteritems(self.users):
client = keystone_admin()
found = client.users.show_user(user['id'])['user']
self.assertEqual(found['name'], user['name'])
self.assertEqual(found['tenantId'], user['tenant_id'])
# also ensure we can auth with that user, and do something
# on the cloud. We don't care about the results except that it
# remains authorized.
client = client_for_user(user['name'])
client.servers.list_servers()
def check_objects(self):
"""Check that the objects created are still there."""
if not self.res.get('objects'):
return
LOG.info("checking objects")
for obj in self.res['objects']:
client = client_for_user(obj['owner'])
r, contents = client.objects.get_object(
obj['container'], obj['name'])
source = _file_contents(obj['file'])
self.assertEqual(contents, source)
def check_servers(self):
"""Check that the servers are still up and running."""
if not self.res.get('servers'):
return
LOG.info("checking servers")
for server in self.res['servers']:
client = client_for_user(server['owner'])
found = _get_server_by_name(client, server['name'])
self.assertIsNotNone(
found,
"Couldn't find expected server %s" % server['name'])
found = client.servers.show_server(found['id'])['server']
# validate neutron is enabled and ironic disabled:
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled):
_floating_is_alive = False
for network_name, body in found['addresses'].items():
for addr in body:
ip = addr['addr']
# Use floating IP, fixed IP or other type to
# reach the server.
# This is useful in multi-node environment.
if CONF.validation.connect_method == 'floating':
if addr.get('OS-EXT-IPS:type',
'floating') == 'floating':
self._ping_ip(ip, 60)
_floating_is_alive = True
elif CONF.validation.connect_method == 'fixed':
if addr.get('OS-EXT-IPS:type',
'fixed') == 'fixed':
namespace = _get_router_namespace(client,
network_name)
self._ping_ip(ip, 60, namespace)
else:
self._ping_ip(ip, 60)
# If CONF.validation.connect_method is floating, validate
# that the floating IP is attached to the server and the
# the server is pingable.
if CONF.validation.connect_method == 'floating':
self.assertTrue(_floating_is_alive,
"Server %s has no floating IP." %
server['name'])
else:
addr = found['addresses']['private'][0]['addr']
self._ping_ip(addr, 60)
def check_secgroups(self):
"""Check that the security groups still exist."""
LOG.info("Checking security groups")
for secgroup in self.res['secgroups']:
client = client_for_user(secgroup['owner'])
found = _get_resource_by_name(client.secgroups, 'security_groups',
secgroup['name'])
self.assertIsNotNone(
found,
"Couldn't find expected secgroup %s" % secgroup['name'])
def check_telemetry(self):
"""Check that ceilometer provides a sane sample.
Confirm that there is more than one sample and that they have the
expected metadata.
If in check mode confirm that the oldest sample available is from
before the upgrade.
"""
if not self.res.get('telemetry'):
return
LOG.info("checking telemetry")
for server in self.res['servers']:
client = client_for_user(server['owner'])
body = client.telemetry.list_samples(
'instance',
query=('metadata.display_name', 'eq', server['name'])
)
self.assertTrue(len(body) >= 1, 'expecting at least one sample')
self._confirm_telemetry_sample(server, body[-1])
def check_volumes(self):
"""Check that the volumes are still there and attached."""
if not self.res.get('volumes'):
return
LOG.info("checking volumes")
for volume in self.res['volumes']:
client = client_for_user(volume['owner'])
vol_body = _get_volume_by_name(client, volume['name'])
self.assertIsNotNone(
vol_body,
"Couldn't find expected volume %s" % volume['name'])
# Verify that a volume's attachment retrieved
server_id = _get_server_by_name(client, volume['server'])['id']
attachment = client.volumes.get_attachment_from_volume(vol_body)
self.assertEqual(vol_body['id'], attachment['volume_id'])
self.assertEqual(server_id, attachment['server_id'])
def _confirm_telemetry_sample(self, server, sample):
"""Check this sample matches the expected resource metadata."""
# Confirm display_name
self.assertEqual(server['name'],
sample['resource_metadata']['display_name'])
# Confirm instance_type of flavor
flavor = sample['resource_metadata'].get(
'flavor.name',
sample['resource_metadata'].get('instance_type')
)
self.assertEqual(server['flavor'], flavor)
# Confirm the oldest sample was created before upgrade.
if OPTS.mode == 'check':
oldest_timestamp = timeutils.normalize_time(
timeutils.parse_isotime(sample['timestamp']))
self.assertTrue(
oldest_timestamp < JAVELIN_START,
'timestamp should come before start of second javelin run'
)
def check_networking(self):
"""Check that the networks are still there."""
for res_type in ('networks', 'subnets', 'routers'):
for res in self.res[res_type]:
client = client_for_user(res['owner'])
found = _get_resource_by_name(client.networks, res_type,
res['name'])
self.assertIsNotNone(
found,
"Couldn't find expected resource %s" % res['name'])
#######################
#
# OBJECTS
#
#######################
def _file_contents(fname):
with open(fname, 'r') as f:
return f.read()
def create_objects(objects):
if not objects:
return
LOG.info("Creating objects")
for obj in objects:
LOG.debug("Object %s" % obj)
swift_role = obj.get('swift_role', 'Member')
_assign_swift_role(obj['owner'], swift_role)
client = client_for_user(obj['owner'])
client.containers.create_container(obj['container'])
client.objects.create_object(
obj['container'], obj['name'],
_file_contents(obj['file']))
def destroy_objects(objects):
for obj in objects:
client = client_for_user(obj['owner'])
r, body = client.objects.delete_object(obj['container'], obj['name'])
if not (200 <= int(r['status']) < 299):
raise ValueError("unable to destroy object: [%s] %s" % (r, body))
#######################
#
# IMAGES
#
#######################
def _resolve_image(image, imgtype):
name = image[imgtype]
fname = os.path.join(OPTS.devstack_base, image['imgdir'], name)
return name, fname
def _get_image_by_name(client, name):
body = client.images.list_images()
for image in body:
if name == image['name']:
return image
return None
def create_images(images):
if not images:
return
LOG.info("Creating images")
for image in images:
client = client_for_user(image['owner'])
# DEPRECATED: 'format' was used for ami images
# Use 'disk_format' and 'container_format' instead
if 'format' in image:
LOG.warning("Deprecated: 'format' is deprecated for images "
"description. Please use 'disk_format' and 'container_"
"format' instead.")
image['disk_format'] = image['format']
image['container_format'] = image['format']
# only upload a new image if the name isn't there
if _get_image_by_name(client, image['name']):
LOG.info("Image '%s' already exists" % image['name'])
continue
# special handling for 3 part image
extras = {}
if image['disk_format'] == 'ami':
name, fname = _resolve_image(image, 'aki')
aki = client.images.create_image(
'javelin_' + name, 'aki', 'aki')
client.images.store_image_file(aki.get('id'), open(fname, 'r'))
extras['kernel_id'] = aki.get('id')
name, fname = _resolve_image(image, 'ari')
ari = client.images.create_image(
'javelin_' + name, 'ari', 'ari')
client.images.store_image_file(ari.get('id'), open(fname, 'r'))
extras['ramdisk_id'] = ari.get('id')
_, fname = _resolve_image(image, 'file')
body = client.images.create_image(
image['name'], image['container_format'],
image['disk_format'], **extras)
image_id = body.get('id')
client.images.store_image_file(image_id, open(fname, 'r'))
def destroy_images(images):
if not images:
return
LOG.info("Destroying images")
for image in images:
client = client_for_user(image['owner'])
response = _get_image_by_name(client, image['name'])
if not response:
LOG.info("Image '%s' does not exist" % image['name'])
continue
client.images.delete_image(response['id'])
#######################
#
# NETWORKS
#
#######################
def _get_router_namespace(client, network):
network_id = _get_resource_by_name(client.networks,
'networks', network)['id']
n_body = client.routers.list_routers()
for router in n_body['routers']:
router_id = router['id']
r_body = client.networks.list_router_interfaces(router_id)
for port in r_body['ports']:
if port['network_id'] == network_id:
return "qrouter-%s" % router_id
def _get_resource_by_name(client, resource, name):
get_resources = getattr(client, 'list_%s' % resource)
if get_resources is None:
raise AttributeError("client doesn't have method list_%s" % resource)
# Until all tempest client methods are changed to return only one value,
# we cannot assume they all have the same signature so we need to discard
# the unused response first value it two values are being returned.
body = get_resources()
if isinstance(body, tuple):
body = body[1]
if isinstance(body, dict):
body = body[resource]
for res in body:
if name == res['name']:
return res
raise ValueError('%s not found in %s resources' % (name, resource))
def create_networks(networks):
LOG.info("Creating networks")
for network in networks:
client = client_for_user(network['owner'])
# only create a network if the name isn't here
body = client.networks.list_networks()
if any(item['name'] == network['name'] for item in body['networks']):
LOG.warning("Duplicated network name: %s" % network['name'])
continue
client.networks.create_network(name=network['name'])
def destroy_networks(networks):
LOG.info("Destroying subnets")
for network in networks:
client = client_for_user(network['owner'])
network_id = _get_resource_by_name(client.networks, 'networks',
network['name'])['id']
client.networks.delete_network(network_id)
def create_subnets(subnets):
LOG.info("Creating subnets")
for subnet in subnets:
client = client_for_user(subnet['owner'])
network = _get_resource_by_name(client.networks, 'networks',
subnet['network'])
ip_version = netaddr.IPNetwork(subnet['range']).version
# ensure we don't overlap with another subnet in the network
try:
client.networks.create_subnet(network_id=network['id'],
cidr=subnet['range'],
name=subnet['name'],
ip_version=ip_version)
except lib_exc.BadRequest as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
def destroy_subnets(subnets):
LOG.info("Destroying subnets")
for subnet in subnets:
client = client_for_user(subnet['owner'])
subnet_id = _get_resource_by_name(client.subnets,
'subnets', subnet['name'])['id']
client.subnets.delete_subnet(subnet_id)
def create_routers(routers):
LOG.info("Creating routers")
for router in routers:
client = client_for_user(router['owner'])
# only create a router if the name isn't here
body = client.routers.list_routers()
if any(item['name'] == router['name'] for item in body['routers']):
LOG.warning("Duplicated router name: %s" % router['name'])
continue
client.networks.create_router(router['name'])
def destroy_routers(routers):
LOG.info("Destroying routers")
for router in routers:
client = client_for_user(router['owner'])
router_id = _get_resource_by_name(client.networks,
'routers', router['name'])['id']
for subnet in router['subnet']:
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet)['id']
client.routers.remove_router_interface(router_id,
subnet_id=subnet_id)
client.routers.delete_router(router_id)
def add_router_interface(routers):
for router in routers:
client = client_for_user(router['owner'])
router_id = _get_resource_by_name(client.networks,
'routers', router['name'])['id']
for subnet in router['subnet']:
subnet_id = _get_resource_by_name(client.networks,
'subnets', subnet)['id']
# connect routers to their subnets
client.routers.add_router_interface(router_id,
subnet_id=subnet_id)
# connect routers to external network if set to "gateway"
if router['gateway']:
if CONF.network.public_network_id:
ext_net = CONF.network.public_network_id
client.routers._update_router(
router_id, set_enable_snat=True,
external_gateway_info={"network_id": ext_net})
else:
raise ValueError('public_network_id is not configured.')
#######################
#
# SERVERS
#
#######################
def _get_server_by_name(client, name):
body = client.servers.list_servers()
for server in body['servers']:
if name == server['name']:
return server
return None
def _get_flavor_by_name(client, name):
body = client.flavors.list_flavors()['flavors']
for flavor in body:
if name == flavor['name']:
return flavor
return None
def create_servers(servers):
if not servers:
return
LOG.info("Creating servers")
for server in servers:
client = client_for_user(server['owner'])
if _get_server_by_name(client, server['name']):
LOG.info("Server '%s' already exists" % server['name'])
continue
image_id = _get_image_by_name(client, server['image'])['id']
flavor_id = _get_flavor_by_name(client, server['flavor'])['id']
# validate neutron is enabled and ironic disabled
kwargs = dict()
if (CONF.service_available.neutron and
not CONF.baremetal.driver_enabled and server.get('networks')):
get_net_id = lambda x: (_get_resource_by_name(
client.networks, 'networks', x)['id'])
kwargs['networks'] = [{'uuid': get_net_id(network)}
for network in server['networks']]
body = client.servers.create_server(
name=server['name'], imageRef=image_id, flavorRef=flavor_id,
**kwargs)['server']
server_id = body['id']
client.servers.wait_for_server_status(server_id, 'ACTIVE')
# create security group(s) after server spawning
for secgroup in server['secgroups']:
client.servers.add_security_group(server_id, name=secgroup)
if CONF.validation.connect_method == 'floating':
floating_ip_pool = server.get('floating_ip_pool')
floating_ip = client.floating_ips.create_floating_ip(
pool_name=floating_ip_pool)['floating_ip']
client.floating_ips.associate_floating_ip_to_server(
floating_ip['ip'], server_id)
def destroy_servers(servers):
if not servers:
return
LOG.info("Destroying servers")
for server in servers:
client = client_for_user(server['owner'])
response = _get_server_by_name(client, server['name'])
if not response:
LOG.info("Server '%s' does not exist" % server['name'])
continue
# TODO(EmilienM): disassociate floating IP from server and release it.
client.servers.delete_server(response['id'])
waiters.wait_for_server_termination(client.servers, response['id'],
ignore_error=True)
def create_secgroups(secgroups):
LOG.info("Creating security groups")
for secgroup in secgroups:
client = client_for_user(secgroup['owner'])
# only create a security group if the name isn't here
# i.e. a security group may be used by another server
# only create a router if the name isn't here
body = client.secgroups.list_security_groups()['security_groups']
if any(item['name'] == secgroup['name'] for item in body):
LOG.warning("Security group '%s' already exists" %
secgroup['name'])
continue
body = client.secgroups.create_security_group(
name=secgroup['name'],
description=secgroup['description'])['security_group']
secgroup_id = body['id']
# for each security group, create the rules
for rule in secgroup['rules']:
ip_proto, from_port, to_port, cidr = rule.split()
client.secrules.create_security_group_rule(
parent_group_id=secgroup_id, ip_protocol=ip_proto,
from_port=from_port, to_port=to_port, cidr=cidr)
def destroy_secgroups(secgroups):
LOG.info("Destroying security groups")
for secgroup in secgroups:
client = client_for_user(secgroup['owner'])
sg_id = _get_resource_by_name(client.secgroups,
'security_groups',
secgroup['name'])
# sg rules are deleted automatically
client.secgroups.delete_security_group(sg_id['id'])
#######################
#
# VOLUMES
#
#######################
def _get_volume_by_name(client, name):
body = client.volumes.list_volumes()['volumes']
for volume in body:
if name == volume['display_name']:
return volume
return None
def create_volumes(volumes):
if not volumes:
return
LOG.info("Creating volumes")
for volume in volumes:
client = client_for_user(volume['owner'])
# only create a volume if the name isn't here
if _get_volume_by_name(client, volume['name']):
LOG.info("volume '%s' already exists" % volume['name'])
continue
size = volume['gb']
v_name = volume['name']
body = client.volumes.create_volume(size=size,
display_name=v_name)['volume']
client.volumes.wait_for_volume_status(body['id'], 'available')
def destroy_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
volume_id = _get_volume_by_name(client, volume['name'])['id']
client.volumes.detach_volume(volume_id)
client.volumes.delete_volume(volume_id)
def attach_volumes(volumes):
for volume in volumes:
client = client_for_user(volume['owner'])
server_id = _get_server_by_name(client, volume['server'])['id']
volume_id = _get_volume_by_name(client, volume['name'])['id']
device = volume['device']
client.volumes.attach_volume(volume_id,
instance_uuid=server_id,
mountpoint=device)
#######################
#
# MAIN LOGIC
#
#######################
def create_resources():
LOG.info("Creating Resources")
# first create keystone level resources, and we need to be admin
# for this.
create_tenants(RES['tenants'])
create_users(RES['users'])
collect_users(RES['users'])
# next create resources in a well known order
create_objects(RES['objects'])
create_images(RES['images'])
# validate neutron is enabled and ironic is disabled
if CONF.service_available.neutron and not CONF.baremetal.driver_enabled:
create_networks(RES['networks'])
create_subnets(RES['subnets'])
create_routers(RES['routers'])
add_router_interface(RES['routers'])
create_secgroups(RES['secgroups'])
create_volumes(RES['volumes'])
# Only attempt attaching the volumes if servers are defined in the
# resource file
if 'servers' in RES:
create_servers(RES['servers'])
attach_volumes(RES['volumes'])
def destroy_resources():
LOG.info("Destroying Resources")
# Destroy in inverse order of create
destroy_servers(RES['servers'])
destroy_images(RES['images'])
destroy_objects(RES['objects'])
destroy_volumes(RES['volumes'])
if CONF.service_available.neutron and not CONF.baremetal.driver_enabled:
destroy_routers(RES['routers'])
destroy_subnets(RES['subnets'])
destroy_networks(RES['networks'])
destroy_secgroups(RES['secgroups'])
destroy_users(RES['users'])
destroy_tenants(RES['tenants'])
LOG.warning("Destroy mode incomplete")
def get_options():
global OPTS
parser = argparse.ArgumentParser(
description='Create and validate a fixed set of OpenStack resources')
parser.add_argument('-m', '--mode',
metavar='<create|check|destroy>',
required=True,
help=('One of (create, check, destroy)'))
parser.add_argument('-r', '--resources',
required=True,
metavar='resourcefile.yaml',
help='Resources definition yaml file')
parser.add_argument(
'-d', '--devstack-base',
required=True,
metavar='/opt/stack/old',
help='Devstack base directory for retrieving artifacts')
parser.add_argument(
'-c', '--config-file',
metavar='/etc/tempest.conf',
help='path to javelin2(tempest) config file')
# auth bits, letting us also just source the devstack openrc
parser.add_argument('--os-username',
metavar='<auth-user-name>',
default=os.environ.get('OS_USERNAME'),
help=('Defaults to env[OS_USERNAME].'))
parser.add_argument('--os-password',
metavar='<auth-password>',
default=os.environ.get('OS_PASSWORD'),
help=('Defaults to env[OS_PASSWORD].'))
parser.add_argument('--os-tenant-name',
metavar='<auth-tenant-name>',
default=os.environ.get('OS_TENANT_NAME'),
help=('Defaults to env[OS_TENANT_NAME].'))
OPTS = parser.parse_args()
if OPTS.mode not in ('create', 'check', 'destroy'):
print("ERROR: Unknown mode -m %s\n" % OPTS.mode)
parser.print_help()
sys.exit(1)
if OPTS.config_file:
config.CONF.set_config_path(OPTS.config_file)
def setup_logging():
global LOG
logging.setup(CONF, __name__)
LOG = logging.getLogger(__name__)
def main():
print("Javelin is deprecated and will be removed from Tempest in the "
"future.")
global RES
get_options()
setup_logging()
RES.update(load_resources(OPTS.resources))
if OPTS.mode == 'create':
create_resources()
# Make sure the resources we just created actually work
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'check':
collect_users(RES['users'])
checker = JavelinCheck(USERS, RES)
checker.check()
elif OPTS.mode == 'destroy':
collect_users(RES['users'])
destroy_resources()
else:
LOG.error('Unknown mode %s' % OPTS.mode)
return 1
LOG.info('javelin2 successfully finished')
return 0
if __name__ == "__main__":
sys.exit(main())
|
|
"""Returns tidy3d simulation from gdsfactory Component."""
import warnings
from typing import Dict, Optional
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import pydantic
import tidy3d as td
import gdsfactory as gf
from gdsfactory.component import Component
from gdsfactory.components.extension import move_polar_rad_copy
from gdsfactory.config import logger
from gdsfactory.routing.sort_ports import sort_ports_x, sort_ports_y
from gdsfactory.simulation.gtidy3d.materials import (
MATERIAL_NAME_TO_TIDY3D_INDEX,
MATERIAL_NAME_TO_TIDY3D_NAME,
get_index,
get_medium,
)
from gdsfactory.tech import LAYER_STACK, LayerStack
from gdsfactory.types import ComponentOrFactory, Float2
@pydantic.validate_arguments
def get_simulation(
component: ComponentOrFactory,
port_extension: Optional[float] = 4.0,
layer_stack: LayerStack = LAYER_STACK,
thickness_pml: float = 1.0,
xmargin: float = 0,
ymargin: float = 0,
xmargin_left: float = 0,
xmargin_right: float = 0,
ymargin_top: float = 0,
ymargin_bot: float = 0,
zmargin: float = 1.0,
clad_material: str = "sio2",
port_source_name: str = "o1",
port_margin: float = 0.5,
port_source_offset: float = 0.1,
distance_source_to_monitors: float = 0.2,
resolution: float = 50,
wavelength_start: float = 1.50,
wavelength_stop: float = 1.60,
wavelength_points: int = 50,
plot_modes: bool = False,
num_modes: int = 2,
run_time_ps: float = 10.0,
dispersive: bool = False,
material_name_to_tidy3d_index: Dict[str, float] = MATERIAL_NAME_TO_TIDY3D_INDEX,
material_name_to_tidy3d_name: Dict[str, str] = MATERIAL_NAME_TO_TIDY3D_NAME,
is_3d: bool = True,
with_all_monitors: bool = False,
) -> td.Simulation:
r"""Returns Simulation object from gdsfactory.component
based on GDS example
https://simulation.cloud/docs/html/examples/ParameterScan.html
.. code::
top view
________________________________
| |
| xmargin_left | port_extension
|<------> port_margin ||<-->
___|___________ _________||___
| \ / |
| \ / |
| ====== |
| / \ |
___|___________/ \__________|___
| | <-------->|
| |ymargin_bot xmargin_right|
| | |
|___|___________________________|
side view
________________________________
| | |
| | |
| zmargin_top |
|xmargin_left | |
|<---> _____ _|___ |
| | | | | |
| | | | | |
| |_____| |_____| |
| | |
| | |
| |zmargin_bot |
| | |
|_______|_______________________|
Args:
component: gdsfactory Component.
port_extension: extend ports beyond the PML.
layer_stack: contains layer numbers (int, int) to thickness, zmin.
thickness_pml: PML thickness (um).
xmargin: left/right distance from component to PML.
xmargin_left: left distance from component to PML.
xmargin_right: right distance from component to PML.
ymargin: left/right distance from component to PML.
ymargin_top: top distance from component to PML.
ymargin_bot: bottom distance from component to PML.
zmargin: thickness for cladding above and below core.
clad_material: material for cladding.
port_source_name: input port name.
port_margin: margin on each side of the port.
distance_source_to_monitors: in (um) source goes before monitors.
port_source_offset: mode solver workaround.
positive moves source forward, negative moves source backward.
resolution: in pixels/um (20: for coarse, 120: for fine)
wavelength_start: in (um).
wavelength_stop: in (um).
wavelength_points: number of wavelengths.
plot_modes: plot source modes.
num_modes: number of modes to plot.
run_time_ps: make sure it's sufficient for the fields to decay.
defaults to 10ps and counts on automatic shutoff to stop earlier if needed.
dispersive: False uses constant refractive index materials.
True adds wavelength depending materials.
Dispersive materials require more computation.
material_name_to_tidy3d_index: not dispersive materials have a constant index.
material_name_to_tidy3d_name: dispersive materials have a wavelength
dependent index. Maps layer_stack names with tidy3d material database names.
is_3d: if False, does not consider Z dimension for faster simulations.
with_all_monitors: if True, includes field monitors which increase results file size.
.. code::
import matplotlib.pyplot as plt
import gdsfactory as gf
import gdsfactory.simulation.tidy3d as gt
c = gf.components.bend_circular()
sim = gt.get_simulation(c)
gt.plot_simulation(sim)
"""
component = component() if callable(component) else component
assert isinstance(component, Component)
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_material = layer_stack.get_layer_to_material()
layer_to_zmin = layer_stack.get_layer_to_zmin()
# layer_to_sidewall_angle = layer_stack.get_layer_to_sidewall_angle()
if dispersive:
material_name_to_tidy3d = material_name_to_tidy3d_name
else:
material_name_to_tidy3d = material_name_to_tidy3d_index
assert isinstance(
component, Component
), f"component needs to be a gf.Component, got Type {type(component)}"
if port_source_name not in component.ports:
warnings.warn(
f"port_source_name={port_source_name} not in {component.ports.keys()}"
)
port_source = component.get_ports_list(port_type="optical")[0]
port_source_name = port_source.name
warnings.warn(f"Selecting port_source_name={port_source_name} instead.")
component_padding = gf.add_padding_container(
component,
default=0,
top=ymargin or ymargin_top,
bottom=ymargin or ymargin_bot,
left=xmargin or xmargin_left,
right=xmargin or xmargin_right,
)
component_extended = (
gf.components.extension.extend_ports(
component=component_padding, length=port_extension, centered=True
)
if port_extension
else component_padding
)
gf.show(component_extended)
component_extended = component_extended.flatten()
component_ref = component_padding.ref()
component_ref.x = 0
component_ref.y = 0
clad_material_name_or_index = material_name_to_tidy3d[clad_material]
clad = td.Structure(
geometry=td.Box(
size=(td.inf, td.inf, td.inf),
center=(0, 0, 0),
),
medium=get_medium(name_or_index=clad_material_name_or_index),
)
structures = [clad]
layers_thickness = [
layer_to_thickness[layer]
for layer in component.get_layers()
if layer in layer_to_thickness
]
t_core = max(layers_thickness)
cell_thickness = (
thickness_pml + t_core + thickness_pml + 2 * zmargin
if is_3d
else 1 / resolution
)
sim_size = [
component_ref.xsize + 2 * thickness_pml,
component_ref.ysize + 2 * thickness_pml,
cell_thickness,
]
for layer in component.layers:
if layer in layer_to_thickness and layer in layer_to_material:
thickness = layer_to_thickness[layer]
zmin = layer_to_zmin[layer] if is_3d else -td.inf
zmax = zmin + thickness if is_3d else td.inf
if (
layer in layer_to_material
and layer_to_material[layer] in material_name_to_tidy3d
):
name_or_index = material_name_to_tidy3d[layer_to_material[layer]]
medium = get_medium(name_or_index=name_or_index)
index = get_index(name_or_index=name_or_index)
logger.debug(
f"Add {layer}, {name_or_index!r}, index = {index:.3f}, "
f"thickness = {thickness}, zmin = {zmin}, zmax = {zmax}"
)
polygons = td.PolySlab.from_gds(
gds_cell=component_extended,
gds_layer=layer[0],
gds_dtype=layer[1],
axis=2,
slab_bounds=(zmin, zmax),
)
for polygon in polygons:
geometry = td.Structure(
geometry=polygon,
medium=medium,
)
structures.append(geometry)
elif layer not in layer_to_material:
logger.debug(f"Layer {layer} not in {layer_to_material.keys()}")
elif layer_to_material[layer] not in material_name_to_tidy3d:
materials = list(material_name_to_tidy3d.keys())
logger.debug(f"material {layer_to_material[layer]} not in {materials}")
# Add source
port = component_ref.ports[port_source_name]
angle = port.orientation
width = port.width + 2 * port_margin
size_x = width * abs(np.sin(angle * np.pi / 180))
size_y = width * abs(np.cos(angle * np.pi / 180))
size_x = 0 if size_x < 0.001 else size_x
size_y = 0 if size_y < 0.001 else size_y
size_z = cell_thickness - 2 * zmargin if is_3d else td.inf
source_size = [size_x, size_y, size_z]
source_center = port.center.tolist() + [0] # (x, y, z=0)
xy_shifted = move_polar_rad_copy(
np.array(port.center), angle=angle * np.pi / 180, length=port_source_offset
)
source_center_offset = xy_shifted.tolist() + [0] # (x, y, z=0)
wavelengths = np.linspace(wavelength_start, wavelength_stop, wavelength_points)
freqs = td.constants.C_0 / wavelengths
freq0 = td.constants.C_0 / np.mean(wavelengths)
fwidth = freq0 / 10
msource = td.ModeSource(
size=source_size,
center=source_center,
source_time=td.GaussianPulse(freq0=freq0, fwidth=fwidth),
direction="+",
)
# Add port monitors
monitors = {}
ports = sort_ports_x(sort_ports_y(component_ref.get_ports_list()))
for port in ports:
port_name = port.name
angle = port.orientation
width = port.width + 2 * port_margin
size_x = width * abs(np.sin(angle * np.pi / 180))
size_y = width * abs(np.cos(angle * np.pi / 180))
size_x = 0 if size_x < 0.001 else size_x
size_y = 0 if size_y < 0.001 else size_y
size = (size_x, size_y, size_z)
# if monitor has a source move monitor inwards
length = -distance_source_to_monitors if port_name == port_source_name else 0
xy_shifted = move_polar_rad_copy(
np.array(port.center), angle=angle * np.pi / 180, length=length
)
center = xy_shifted.tolist() + [0] # (x, y, z=0)
monitors[port_name] = td.ModeMonitor(
center=center,
size=size,
freqs=freqs,
mode_spec=td.ModeSpec(num_modes=1),
name=port.name,
)
zcenter = (zmax + zmin) / 2 if is_3d else 0
domain_monitor = td.FieldMonitor(
center=[0, 0, zcenter],
size=[sim_size[0], sim_size[1], 0] if is_3d else [td.inf, td.inf, 0],
freqs=[freq0],
name="field",
)
monitors = list(monitors.values())
monitors += [domain_monitor] if with_all_monitors else []
sim = td.Simulation(
size=sim_size,
grid_size=3 * [1 / resolution],
structures=structures,
sources=[msource],
monitors=monitors,
run_time=20 * run_time_ps / fwidth,
pml_layers=3 * [td.PML()] if is_3d else [td.PML(), td.PML(), None],
)
if plot_modes:
src_plane = td.Box(center=source_center_offset, size=source_size)
ms = td.plugins.ModeSolver(simulation=sim, plane=src_plane, freq=freq0)
mode_spec = td.ModeSpec(num_modes=num_modes)
modes = ms.solve(mode_spec=mode_spec)
print(
"Effective index of computed modes: ",
", ".join([f"{mode.n_eff:1.4f}" for mode in modes]),
)
if is_3d:
fig, axs = plt.subplots(num_modes, 2, figsize=(12, 12))
else:
fig, axs = plt.subplots(num_modes, 3, figsize=(12, 12))
for mode_ind in range(num_modes):
if is_3d:
abs(modes[mode_ind].field_data.Ey).plot(
x="y", y="z", cmap="magma", ax=axs[mode_ind, 0]
)
abs(modes[mode_ind].field_data.Ez).plot(
x="y", y="z", cmap="magma", ax=axs[mode_ind, 1]
)
else:
abs(modes[mode_ind].field_data.Ex).plot(ax=axs[mode_ind, 0])
abs(modes[mode_ind].field_data.Ey).plot(ax=axs[mode_ind, 1])
abs(modes[mode_ind].field_data.Ez).plot(ax=axs[mode_ind, 2])
axs[mode_ind, 0].set_title(f"|Ex|: mode_index={mode_ind}")
axs[mode_ind, 1].set_title(f"|Ey|: mode_index={mode_ind}")
axs[mode_ind, 2].set_title(f"|Ez|: mode_index={mode_ind}")
if is_3d:
axs[mode_ind, 0].set_aspect("equal")
axs[mode_ind, 1].set_aspect("equal")
plt.show()
return sim
def plot_simulation_yz(
sim: td.Simulation,
z: float = 0.0,
y: float = 0.0,
wavelength: Optional[float] = 1.55,
figsize: Float2 = (11, 4),
):
"""Returns Simulation visual representation.
returns two views for 3D component and one view for 2D
Args:
sim: simulation object
z: (um)
y: (um)
wavelength: (um) for epsilon plot if None plot structures.
figsize: figure size
"""
fig = plt.figure(figsize=figsize)
if sim.size[2] > 0.1 and sim.size[1] > 0.1:
gs = mpl.gridspec.GridSpec(1, 2, figure=fig, width_ratios=[1, 1.4])
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
if wavelength:
freq = td.constants.C_0 / wavelength
sim.plot_eps(z=z, ax=ax1, freq=freq)
sim.plot_eps(y=y, ax=ax2, freq=freq)
else:
sim.plot(z=z, ax=ax1)
sim.plot(y=y, ax=ax2)
elif sim.size[2] > 0.1: # 2D grating sim_size_y = 0
gs = mpl.gridspec.GridSpec(1, 1, figure=fig, width_ratios=[1])
ax1 = fig.add_subplot(gs[0, 0])
if wavelength:
freq = td.constants.C_0 / wavelength
sim.plot_eps(y=y, ax=ax1, freq=freq)
else:
sim.plot(y=y, ax=ax1)
else: # 2D planar component size_z = 0
gs = mpl.gridspec.GridSpec(1, 1, figure=fig, width_ratios=[1])
ax1 = fig.add_subplot(gs[0, 0])
if wavelength:
freq = td.constants.C_0 / wavelength
sim.plot_eps(z=z, ax=ax1, freq=freq)
else:
sim.plot(z=z, ax=ax1)
plt.show()
return fig
def plot_simulation_xz(
sim: td.Simulation,
x: float = 0.0,
z: float = 0.0,
wavelength: Optional[float] = 1.55,
figsize: Float2 = (11, 4),
):
"""Returns figure with two axis of the Simulation.
Args:
sim: simulation object
x: (um)
z: (um)
wavelength: (um) for epsilon plot if None plot structures.
figsize: figure size
"""
fig = plt.figure(figsize=figsize)
gs = mpl.gridspec.GridSpec(1, 2, figure=fig, width_ratios=[1, 1.4])
ax1 = fig.add_subplot(gs[0, 0])
ax2 = fig.add_subplot(gs[0, 1])
if wavelength:
freq = td.constants.C_0 / wavelength
sim.plot_eps(z=z, ax=ax1, freq=freq)
sim.plot_eps(x=x, ax=ax2, freq=freq)
else:
sim.plot(z=z, ax=ax1)
sim.plot(x=x, ax=ax2)
plt.show()
return fig
plot_simulation = plot_simulation_yz
if __name__ == "__main__":
# c = gf.components.mmi1x2()
# c = gf.components.bend_circular(radius=2)
# c = gf.components.crossing()
# c = gf.c.straight_rib()
c = gf.c.straight(length=3)
sim = get_simulation(c, plot_modes=False, is_3d=False)
plot_simulation(sim)
# filepath = pathlib.Path(__file__).parent / "extra" / "wg2d.json"
# filepath.write_text(sim.json())
# sim.plotly(z=0)
# plot_simulation_yz(sim, wavelength=1.55)
# fig = plt.figure(figsize=(11, 4))
# gs = mpl.gridspec.GridSpec(1, 2, figure=fig, width_ratios=[1, 1.4])
# ax1 = fig.add_subplot(gs[0, 0])
# ax2 = fig.add_subplot(gs[0, 1])
# sim.plot(z=0.0, ax=ax1)
# sim.plot(x=0.0, ax=ax2)
# plt.show()
|
|
import unittest
import mock
from ...management.actions import Actions
class TestActions(unittest.TestCase):
def test_init_with_optionals(self):
t = Actions(domain='domain', token='jwttoken', telemetry=False, timeout=(10, 2))
self.assertEqual(t.client.options.timeout, (10, 2))
telemetry_header = t.client.base_headers.get('Auth0-Client', None)
self.assertEqual(telemetry_header, None)
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_actions(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_actions()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/actions', args[0])
self.assertEqual(kwargs['params'], {'triggerId': None,
'actionName': None,
'deployed': 'false',
'installed': 'false',
'page': None,
'per_page': None})
c.get_actions('trigger-id', 'action-name', True, True, 0, 5)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/actions', args[0])
self.assertEqual(kwargs['params'], {'triggerId': 'trigger-id',
'actionName': 'action-name',
'deployed': 'true',
'installed': 'true',
'page': 0,
'per_page': 5})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_create_action(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.create_action({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/actions/actions',
data={'a': 'b', 'c': 'd'}
)
@mock.patch('auth0.v3.management.actions.RestClient')
def test_update_action(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.update_action('action-id', {'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_action(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_action('action-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id', args[0])
self.assertEqual(kwargs['params'], {})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_triggers(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_triggers()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/triggers', args[0])
self.assertEqual(kwargs['params'], {})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_delete_action(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.delete_action('action-id')
args, kwargs = mock_instance.delete.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id', args[0])
self.assertEqual(kwargs['params'], {'force': 'false'})
c.delete_action('action-id', True)
args, kwargs = mock_instance.delete.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id', args[0])
self.assertEqual(kwargs['params'], {'force': 'true'})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_execution(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_execution('execution-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/executions/execution-id', args[0])
self.assertEqual(kwargs['params'], {})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_action_versions(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_action_versions('action-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id/versions', args[0])
self.assertEqual(kwargs['params'], {'page': None,
'per_page': None})
c.get_action_versions('action-id', 0, 5)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id/versions', args[0])
self.assertEqual(kwargs['params'], {'page': 0,
'per_page': 5})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_trigger_bindings(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_trigger_bindings('trigger-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/triggers/trigger-id/bindings', args[0])
self.assertEqual(kwargs['params'], {'page': None,
'per_page': None})
c.get_trigger_bindings('trigger-id', 0, 5)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/triggers/trigger-id/bindings', args[0])
self.assertEqual(kwargs['params'], {'page': 0,
'per_page': 5})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_get_action_version(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.get_action_version('action-id', 'version-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id/versions/version-id', args[0])
self.assertEqual(kwargs['params'], {})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_deploy_action(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.deploy_action('action-id')
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id/deploy', args[0])
@mock.patch('auth0.v3.management.actions.RestClient')
def test_rollback_action(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.rollback_action_version('action-id', 'version-id')
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/actions/actions/action-id/versions/version-id/deploy', args[0])
self.assertEqual(kwargs['data'], {})
@mock.patch('auth0.v3.management.actions.RestClient')
def test_update_trigger_bindings(self, mock_rc):
mock_instance = mock_rc.return_value
c = Actions(domain='domain', token='jwttoken')
c.update_trigger_bindings('trigger-id', {'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/actions/triggers/trigger-id/bindings', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
|
|
import numpy as np
import fft
import data
class rosenfeld(object):
def __init__(self, system, sigma={}):
self.system = system
self.coeff = data.coeff(self.system, require='sigma')
# flood the coefficient dictionary with defaults
for t in self.system.types:
self.coeff.set(t, sigma=self.system.sigma[t])
self.weights = (0,1,2,3,'v1','v2')
def w(self, a, type, z):
assert a in self.weights
if not self.coeff.verify():
raise Exception('not all parameters are set!')
sigma = self.coeff.get(type, 'sigma')
if self._is_ideal(sigma):
return np.zeros_like(z)
R = 0.5*sigma
if a == 0:
return (0.5 / R) * np.ones_like(z)
elif a == 1:
return 0.5 * np.ones_like(z)
elif a == 2:
return 2.*np.pi*R * np.ones_like(z)
elif a == 3:
return np.pi*(R**2 - z*z)
elif a == 'v1':
# return as scalar because only nonzero entry is along ez
return 0.5*z/R
elif a == 'v2':
# return as scalar because only nonzero entry is along ez
return 2.*np.pi*z
def wk(self, a, type, k):
assert a in self.weights
if not self.coeff.verify():
raise Exception('not all parameters are set!')
sigma = self.coeff.get(type, 'sigma')
wk = np.zeros_like(k, dtype=np.complex_)
if self._is_ideal(sigma):
return wk
R = 0.5*sigma
omega = 2.0 * np.pi * k
flags = ~np.isclose(omega, 0.0)
if a == 0:
wk[flags] = np.sin(omega[flags] * R) / (omega[flags] * R)
wk[~flags] = 1.0
elif a == 1:
wk[flags] = np.sin(omega[flags] * R)/ omega[flags]
wk[~flags] = R
elif a == 2:
wk[flags] = 2.0 * R * np.sin(omega[flags] * R) / k[flags]
wk[~flags] = 4.0 * np.pi * R**2
elif a == 3:
wk[flags] = (2.0/k[flags]) * (np.sin(omega[flags] * R) - omega[flags] * R * np.cos(omega[flags] * R))/omega[flags]**2
wk[~flags] = (4.0*np.pi*R**3)/3.0
elif a == 'v1':
wk[flags] = -1.j * (np.sin(omega[flags] * R) - omega[flags] * R * np.cos(omega[flags] * R))/(R * omega[flags]**2)
wk[~flags] = 0.0
elif a == 'v2':
wk[flags] = -1.j * (4.0 * np.pi) * (np.sin(omega[flags] * R) - omega[flags] * R * np.cos(omega[flags] * R))/omega[flags]**2
wk[~flags] = 0.0
return wk
def n(self, a, densities, bulk=False):
assert a in self.weights
if not self.coeff.verify():
raise Exception('not all parameters are set!')
if bulk:
n = np.zeros_like(densities[self.system.types[0]])
for t in self.system.types:
sigma = self.coeff.get(t,'sigma')
if self._is_ideal(sigma):
continue
if a == 0:
n += densities[t]
elif a == 1:
n += densities[t] * 0.5*sigma
elif a == 2:
n += 4.0 * np.pi * (0.5*sigma)**2 * densities[t]
elif a == 3:
n += (4.0 * np.pi / 3.0) * (0.5*sigma)**3 * densities[t]
elif a == 'v1' or a == 'v2':
n += 0.0
else:
n = np.zeros_like(self.system.mesh)
k_mesh = np.fft.fftfreq(len(self.system.mesh), self.system.dz)
for t in self.system.types:
sigma = self.coeff.get(t,'sigma')
if self._is_ideal(sigma):
continue
rho = np.array(densities[t])
n += np.real(np.fft.ifft(np.fft.fft(rho) * self.wk(a,t,k_mesh)))
return n
def _is_ideal(self,sigma):
"""Check if particle diameter signals ideal"""
return sigma is None or not sigma > 0.0 or sigma is False
def f1(self, n3):
return -np.log(1.-n3)
def df1(self,n3):
return 1./(1.-n3)
def f2(self, n3):
return 1./(1.-n3)
def df2(self, n3):
return 1./(1.-n3)**2
def f4(self, n3):
return 1./(24.*np.pi*(1.-n3)**2)
def df4(self, n3):
return 1./(12.*np.pi*(1.-n3)**3)
def phi(self, densities):
# precompute the weights
n0 = self.n(0, densities)
n1 = self.n(1, densities)
n2 = self.n(2, densities)
n3 = self.n(3, densities)
nv1 = self.n('v1', densities)
nv2 = self.n('v2', densities)
if np.any(n3 > 1.0):
raise Exception('n3 > 1.0, solution may be diverging!')
return self.f1(n3)*n0 + self.f2(n3)*(n1*n2 - nv1*nv2) + self.f4(n3)*(n2**3 - 3.*n2*nv2**2)
def dphi(self, densities,bulk=False):
# precompute the weights
n0 = self.n(0, densities, bulk)
n1 = self.n(1, densities, bulk)
n2 = self.n(2, densities, bulk)
n3 = self.n(3, densities, bulk)
nv1 = self.n('v1', densities, bulk)
nv2 = self.n('v2', densities, bulk)
if np.any(n3 > 1.0):
raise Exception('n3 > 1.0, solution may be diverging!')
# precompute the partials
dphi_dn = {}
dphi_dn[0] = self.f1(n3)
dphi_dn[1] = self.f2(n3)*n2
dphi_dn[2] = self.f2(n3)*n1 + 3.*self.f4(n3)*(n2*n2 - nv2*nv2)
dphi_dn[3] = self.df1(n3)*n0 + self.df2(n3)*(n1*n2-nv1*nv2) + self.df4(n3)*(n2**3-3.*n2*nv2**2)
dphi_dn['v1'] = -self.f2(n3)*nv2
dphi_dn['v2'] = -self.f2(n3)*nv1 - 6.*self.f4(n3)*n2*nv2
return dphi_dn
def F_ex(self, densities):
phi = self.phi(densities)
return self.system.dz * np.sum(phi)
def mu_ex(self, densities):
dphi_dn = self.dphi(densities)
if not self.coeff.verify():
raise Exception('not all parameters are set!')
# initialize for summation
mu_ex = {}
for t in self.system.types:
mu_ex[t] = np.zeros_like(self.system.mesh)
k_mesh = np.fft.fftfreq(len(self.system.mesh), self.system.dz)
sigma = self.coeff.get(t, 'sigma')
if self._is_ideal(sigma):
continue
z = np.arange(-0.5*sigma,0.5*sigma+self.system.dz, self.system.dz)
for a in (0,1,2,3,'v1','v2'):
sign = 1.0
if a == 'v1' or a == 'v2':
sign = -1.0
mu_ex[t] += np.real(np.fft.ifft(np.fft.fft(dphi_dn[a]) * sign * self.wk(a,t,k_mesh)))
return mu_ex
class whitebear(rosenfeld):
def __init__(self, system, sigma={}):
rosenfeld.__init__(self,system,sigma)
def f4(self, n3):
try:
all_f4 = np.zeros(len(n3))
n3_arr = np.array(n3)
# flag entries of n3 that are big enough to use the actual formula
flags = (n3_arr > whitebear._f4_threshold)
# apply real formula to the "big" ones
n3_big = n3_arr[flags]
if len(n3_big) > 0:
all_f4[flags] = (n3_big+(1.-n3_big)**2*np.log(1.-n3_big))/(36.*np.pi*n3_big**2*(1.-n3_big)**2)
# use the taylor series for the "small" ones
# this seems very accurate over the 1.e-2 range in Mathematica
if len(n3_big) < len(n3_arr):
n3_small = n3_arr[~flags]
all_f4[~flags] = 1./(24.*np.pi) + 2.*n3_small/(27.*np.pi) + 5.*n3_small**2/(48.*np.pi)
return all_f4
except TypeError:
# catch single values rather than arrays and just do scalar arithmetic
if n3 > whitebear._f4_threshold:
return (n3+(1.-n3)**2*np.log(1.-n3))/(36.*np.pi*n3**2*(1.-n3)**2)
else:
return 1./(24.*np.pi) + 2.*n3/(27.*np.pi) + 5.*n3**2/(48.*np.pi)
def df4(self, n3):
try:
all_df4 = np.zeros(len(n3))
n3_arr = np.array(n3)
# flag entries of n3 that are big enough to use the actual formula
flags = n3_arr > whitebear._f4_threshold
# apply real formula to the "big" ones
n3_big = n3_arr[flags]
if len(n3_big) > 0:
all_df4[flags] = -(2.-5.*n3_big+n3_big**2)/(36*np.pi*(1.-n3_big)**3*n3_big**2)-np.log(1.-n3_big)/(18.*np.pi*n3_big**3)
# use the taylor series for the "small" ones
# this seems very accurate over the 1.e-2 range in Mathematica
if len(n3_big) < len(n3_arr):
n3_small = n3_arr[~flags]
all_df4[~flags] = 2./(27.*np.pi) + 5.*n3_small/(24.*np.pi) + 2.*n3_small**2/(5.*np.pi)
return all_df4
except TypeError:
# catch single values rather than arrays and just do scalar arithmetic
if n3 > whitebear._f4_threshold:
return -(2.-5.*n3+n3**2)/(36*np.pi*(1.-n3)**3*n3**2)-np.log(1.-n3)/(18.*np.pi*n3**3)
else:
return 2./(27.*np.pi) + 5.*n3/(24.*np.pi) + 2.*n3**2/(5.*np.pi)
whitebear._f4_threshold = 1.e-2
|
|
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 VMware, Inc.
# Copyright (c) 2011 Citrix Systems, Inc.
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
A fake VMware VI API implementation.
"""
import collections
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import units
from oslo_utils import uuidutils
from oslo_vmware import exceptions as vexc
from oslo_vmware.objects import datastore as ds_obj
from nova import exception
from nova.virt.vmwareapi import constants
_CLASSES = ['Datacenter', 'Datastore', 'ResourcePool', 'VirtualMachine',
'Network', 'HostSystem', 'HostNetworkSystem', 'Task', 'session',
'files', 'ClusterComputeResource', 'HostStorageSystem']
_FAKE_FILE_SIZE = 1024
_FAKE_VCENTER_UUID = '497c514c-ef5e-4e7f-8d93-ec921993b93a'
_db_content = {}
_array_types = {}
_vim_map = {}
LOG = logging.getLogger(__name__)
def reset():
"""Resets the db contents."""
cleanup()
create_network()
create_host_network_system()
create_host_storage_system()
ds_ref1 = create_datastore('ds1', 1024, 500)
create_host(ds_ref=ds_ref1)
ds_ref2 = create_datastore('ds2', 1024, 500)
create_host(ds_ref=ds_ref2)
create_datacenter('dc1', ds_ref1)
create_datacenter('dc2', ds_ref2)
create_res_pool()
create_cluster('test_cluster', ds_ref1)
create_cluster('test_cluster2', ds_ref2)
def cleanup():
"""Clear the db contents."""
for c in _CLASSES:
# We fake the datastore by keeping the file references as a list of
# names in the db
if c == 'files':
_db_content[c] = []
else:
_db_content[c] = {}
def _create_object(table, table_obj):
"""Create an object in the db."""
_db_content[table][table_obj.obj] = table_obj
def _get_object(obj_ref):
"""Get object for the give reference."""
return _db_content[obj_ref.type][obj_ref]
def _get_objects(obj_type):
"""Get objects of the type."""
lst_objs = FakeRetrieveResult()
for key in _db_content[obj_type]:
lst_objs.add_object(_db_content[obj_type][key])
return lst_objs
def _convert_to_array_of_mor(mors):
"""Wraps the given array into a DataObject."""
array_of_mors = DataObject()
array_of_mors.ManagedObjectReference = mors
return array_of_mors
def _convert_to_array_of_opt_val(optvals):
"""Wraps the given array into a DataObject."""
array_of_optv = DataObject()
array_of_optv.OptionValue = optvals
return array_of_optv
def _create_array_of_type(t):
"""Returns an array to contain objects of type t."""
if t in _array_types:
return _array_types[t]()
array_type_name = 'ArrayOf%s' % t
array_type = type(array_type_name, (DataObject,), {})
def __init__(self):
super(array_type, self).__init__(array_type_name)
setattr(self, t, [])
setattr(array_type, '__init__', __init__)
_array_types[t] = array_type
return array_type()
class FakeRetrieveResult(object):
"""Object to retrieve a ObjectContent list."""
def __init__(self, token=None):
self.objects = []
if token is not None:
self.token = token
def add_object(self, object):
self.objects.append(object)
class MissingProperty(object):
"""Missing object in ObjectContent's missing set."""
def __init__(self, path='fake-path', message='fake_message',
method_fault=None):
self.path = path
self.fault = DataObject()
self.fault.localizedMessage = message
self.fault.fault = method_fault
def _get_object_refs(obj_type):
"""Get object References of the type."""
lst_objs = []
for key in _db_content[obj_type]:
lst_objs.append(key)
return lst_objs
def _update_object(table, table_obj):
"""Update objects of the type."""
_db_content[table][table_obj.obj] = table_obj
class Prop(object):
"""Property Object base class."""
def __init__(self, name=None, val=None):
self.name = name
self.val = val
class ManagedObjectReference(object):
"""A managed object reference is a remote identifier."""
def __init__(self, name="ManagedObject", value=None):
super(ManagedObjectReference, self)
# Managed Object Reference value attributes
# typically have values like vm-123 or
# host-232 and not UUID.
self.value = value
# Managed Object Reference type
# attributes hold the name of the type
# of the vCenter object the value
# attribute is the identifier for
self.type = name
self._type = name
class ObjectContent(object):
"""ObjectContent array holds dynamic properties."""
# This class is a *fake* of a class sent back to us by
# SOAP. It has its own names. These names are decided
# for us by the API we are *faking* here.
def __init__(self, obj_ref, prop_list=None, missing_list=None):
self.obj = obj_ref
if not isinstance(prop_list, collections.Iterable):
prop_list = []
if not isinstance(missing_list, collections.Iterable):
missing_list = []
# propSet is the name your Python code will need to
# use since this is the name that the API will use
if prop_list:
self.propSet = prop_list
# missingSet is the name your python code will
# need to use since this is the name that the
# API we are talking to will use.
if missing_list:
self.missingSet = missing_list
class ManagedObject(object):
"""Managed Object base class."""
_counter = 0
def __init__(self, mo_id_prefix="obj"):
"""Sets the obj property which acts as a reference to the object."""
object.__setattr__(self, 'mo_id', self._generate_moid(mo_id_prefix))
object.__setattr__(self, 'propSet', [])
object.__setattr__(self, 'obj',
ManagedObjectReference(self.__class__.__name__,
self.mo_id))
def set(self, attr, val):
"""Sets an attribute value. Not using the __setattr__ directly for we
want to set attributes of the type 'a.b.c' and using this function
class we set the same.
"""
self.__setattr__(attr, val)
def get(self, attr):
"""Gets an attribute. Used as an intermediary to get nested
property like 'a.b.c' value.
"""
return self.__getattr__(attr)
def delete(self, attr):
"""Deletes an attribute."""
self.propSet = filter(lambda elem: elem.name != attr, self.propSet)
def __setattr__(self, attr, val):
# TODO(hartsocks): this is adds unnecessary complexity to the class
for prop in self.propSet:
if prop.name == attr:
prop.val = val
return
elem = Prop()
elem.name = attr
elem.val = val
self.propSet.append(elem)
def __getattr__(self, attr):
# TODO(hartsocks): remove this
# in a real ManagedObject you have to iterate the propSet
# in a real ManagedObject, the propSet is a *set* not a list
for elem in self.propSet:
if elem.name == attr:
return elem.val
msg = "Property %(attr)s not set for the managed object %(name)s"
raise exception.NovaException(msg % {'attr': attr,
'name': self.__class__.__name__})
def _generate_moid(self, prefix):
"""Generates a new Managed Object ID."""
self.__class__._counter += 1
return prefix + "-" + str(self.__class__._counter)
def __repr__(self):
return jsonutils.dumps({elem.name: elem.val
for elem in self.propSet})
class DataObject(object):
"""Data object base class."""
def __init__(self, obj_name=None):
self.obj_name = obj_name
def __repr__(self):
return str(self.__dict__)
def __eq__(self, other):
return self.__dict__ == other.__dict__
class HostInternetScsiHba(DataObject):
"""iSCSI Host Bus Adapter."""
def __init__(self):
super(HostInternetScsiHba, self).__init__()
self.device = 'vmhba33'
self.key = 'key-vmhba33'
class FileAlreadyExists(DataObject):
"""File already exists class."""
def __init__(self):
super(FileAlreadyExists, self).__init__()
self.__name__ = vexc.FILE_ALREADY_EXISTS
class FileNotFound(DataObject):
"""File not found class."""
def __init__(self):
super(FileNotFound, self).__init__()
self.__name__ = vexc.FILE_NOT_FOUND
class FileFault(DataObject):
"""File fault."""
def __init__(self):
super(FileFault, self).__init__()
self.__name__ = vexc.FILE_FAULT
class CannotDeleteFile(DataObject):
"""Cannot delete file."""
def __init__(self):
super(CannotDeleteFile, self).__init__()
self.__name__ = vexc.CANNOT_DELETE_FILE
class FileLocked(DataObject):
"""File locked."""
def __init__(self):
super(FileLocked, self).__init__()
self.__name__ = vexc.FILE_LOCKED
class VirtualDisk(DataObject):
"""Virtual Disk class."""
def __init__(self, controllerKey=0, unitNumber=0):
super(VirtualDisk, self).__init__()
self.key = 0
self.controllerKey = controllerKey
self.unitNumber = unitNumber
class VirtualDiskFlatVer2BackingInfo(DataObject):
"""VirtualDiskFlatVer2BackingInfo class."""
def __init__(self):
super(VirtualDiskFlatVer2BackingInfo, self).__init__()
self.thinProvisioned = False
self.eagerlyScrub = False
class VirtualDiskRawDiskMappingVer1BackingInfo(DataObject):
"""VirtualDiskRawDiskMappingVer1BackingInfo class."""
def __init__(self):
super(VirtualDiskRawDiskMappingVer1BackingInfo, self).__init__()
self.lunUuid = ""
class VirtualIDEController(DataObject):
def __init__(self, key=0):
self.key = key
class VirtualLsiLogicController(DataObject):
"""VirtualLsiLogicController class."""
def __init__(self, key=0, scsiCtlrUnitNumber=0):
self.key = key
self.scsiCtlrUnitNumber = scsiCtlrUnitNumber
self.device = []
class VirtualLsiLogicSASController(DataObject):
"""VirtualLsiLogicSASController class."""
pass
class VirtualPCNet32(DataObject):
"""VirtualPCNet32 class."""
def __init__(self):
super(VirtualPCNet32, self).__init__()
self.key = 4000
class OptionValue(DataObject):
"""OptionValue class."""
def __init__(self, key=None, value=None):
super(OptionValue, self).__init__()
self.key = key
self.value = value
class VirtualMachine(ManagedObject):
"""Virtual Machine class."""
def __init__(self, **kwargs):
super(VirtualMachine, self).__init__("vm")
self.set("name", kwargs.get("name", 'test-vm'))
self.set("runtime.connectionState",
kwargs.get("conn_state", "connected"))
self.set("summary.config.guestId",
kwargs.get("guest", constants.DEFAULT_OS_TYPE))
ds_do = kwargs.get("ds", None)
self.set("datastore", _convert_to_array_of_mor(ds_do))
self.set("summary.guest.toolsStatus", kwargs.get("toolsstatus",
"toolsOk"))
self.set("summary.guest.toolsRunningStatus", kwargs.get(
"toolsrunningstate", "guestToolsRunning"))
self.set("runtime.powerState", kwargs.get("powerstate", "poweredOn"))
self.set("config.files.vmPathName", kwargs.get("vmPathName"))
self.set("summary.config.numCpu", kwargs.get("numCpu", 1))
self.set("summary.config.memorySizeMB", kwargs.get("mem", 1))
self.set("summary.config.instanceUuid", kwargs.get("instanceUuid"))
self.set("version", kwargs.get("version"))
devices = _create_array_of_type('VirtualDevice')
devices.VirtualDevice = kwargs.get("virtual_device", [])
self.set("config.hardware.device", devices)
exconfig_do = kwargs.get("extra_config", None)
self.set("config.extraConfig",
_convert_to_array_of_opt_val(exconfig_do))
if exconfig_do:
for optval in exconfig_do:
self.set('config.extraConfig["%s"]' % optval.key, optval)
self.set('runtime.host', kwargs.get("runtime_host", None))
self.device = kwargs.get("virtual_device", [])
# Sample of diagnostics data is below.
config = [
('template', False),
('vmPathName', 'fake_path'),
('memorySizeMB', 512),
('cpuReservation', 0),
('memoryReservation', 0),
('numCpu', 1),
('numEthernetCards', 1),
('numVirtualDisks', 1)]
self.set("summary.config", config)
quickStats = [
('overallCpuUsage', 0),
('overallCpuDemand', 0),
('guestMemoryUsage', 0),
('hostMemoryUsage', 141),
('balloonedMemory', 0),
('consumedOverheadMemory', 20)]
self.set("summary.quickStats", quickStats)
key1 = {'key': 'cpuid.AES'}
key2 = {'key': 'cpuid.AVX'}
runtime = [
('connectionState', 'connected'),
('powerState', 'poweredOn'),
('toolsInstallerMounted', False),
('suspendInterval', 0),
('memoryOverhead', 21417984),
('maxCpuUsage', 2000),
('featureRequirement', [key1, key2])]
self.set("summary.runtime", runtime)
def _update_extra_config(self, extra):
extra_config = self.get("config.extraConfig")
values = extra_config.OptionValue
for value in values:
if value.key == extra.key:
value.value = extra.value
return
kv = DataObject()
kv.key = extra.key
kv.value = extra.value
extra_config.OptionValue.append(kv)
self.set("config.extraConfig", extra_config)
extra_config = self.get("config.extraConfig")
def reconfig(self, factory, val):
"""Called to reconfigure the VM. Actually customizes the property
setting of the Virtual Machine object.
"""
if hasattr(val, 'name') and val.name:
self.set("name", val.name)
if hasattr(val, 'extraConfig'):
extraConfigs = _merge_extraconfig(
self.get("config.extraConfig").OptionValue,
val.extraConfig)
self.get("config.extraConfig").OptionValue = extraConfigs
if hasattr(val, 'instanceUuid') and val.instanceUuid is not None:
if val.instanceUuid == "":
val.instanceUuid = uuidutils.generate_uuid()
self.set("summary.config.instanceUuid", val.instanceUuid)
try:
if not hasattr(val, 'deviceChange'):
return
if hasattr(val, 'extraConfig'):
# there are 2 cases - new entry or update an existing one
for extra in val.extraConfig:
self._update_extra_config(extra)
if len(val.deviceChange) < 2:
return
# Case of Reconfig of VM to attach disk
controller_key = val.deviceChange[0].device.controllerKey
filename = val.deviceChange[0].device.backing.fileName
disk = VirtualDisk()
disk.controllerKey = controller_key
disk_backing = VirtualDiskFlatVer2BackingInfo()
disk_backing.fileName = filename
disk_backing.key = -101
disk.backing = disk_backing
disk.capacityInBytes = 1024
disk.capacityInKB = 1
controller = VirtualLsiLogicController()
controller.key = controller_key
devices = _create_array_of_type('VirtualDevice')
devices.VirtualDevice = [disk, controller, self.device[0]]
self.set("config.hardware.device", devices)
except AttributeError:
pass
class Network(ManagedObject):
"""Network class."""
def __init__(self):
super(Network, self).__init__("network")
self.set("summary.name", "vmnet0")
class ResourcePool(ManagedObject):
"""Resource Pool class."""
def __init__(self, name="test_ResPool", value="resgroup-test"):
super(ResourcePool, self).__init__("rp")
self.set("name", name)
summary = DataObject()
runtime = DataObject()
config = DataObject()
memory = DataObject()
cpu = DataObject()
memoryAllocation = DataObject()
cpuAllocation = DataObject()
vm_list = DataObject()
memory.maxUsage = 1000 * units.Mi
memory.overallUsage = 500 * units.Mi
cpu.maxUsage = 10000
cpu.overallUsage = 1000
runtime.cpu = cpu
runtime.memory = memory
summary.runtime = runtime
cpuAllocation.limit = 10000
memoryAllocation.limit = 1024
memoryAllocation.reservation = 1024
config.memoryAllocation = memoryAllocation
config.cpuAllocation = cpuAllocation
vm_list.ManagedObjectReference = []
self.set("summary", summary)
self.set("summary.runtime.memory", memory)
self.set("config", config)
self.set("vm", vm_list)
parent = ManagedObjectReference(value=value,
name=name)
owner = ManagedObjectReference(value=value,
name=name)
self.set("parent", parent)
self.set("owner", owner)
class DatastoreHostMount(DataObject):
def __init__(self, value='host-100'):
super(DatastoreHostMount, self).__init__()
host_ref = (_db_content["HostSystem"]
[_db_content["HostSystem"].keys()[0]].obj)
host_system = DataObject()
host_system.ManagedObjectReference = [host_ref]
host_system.value = value
self.key = host_system
class ClusterComputeResource(ManagedObject):
"""Cluster class."""
def __init__(self, name="test_cluster"):
super(ClusterComputeResource, self).__init__("domain")
self.set("name", name)
self.set("host", None)
self.set("datastore", None)
self.set("resourcePool", None)
summary = DataObject()
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
summary.effectiveCpu = 10000
self.set("summary", summary)
def _add_root_resource_pool(self, r_pool):
if r_pool:
self.set("resourcePool", r_pool)
def _add_host(self, host_sys):
if host_sys:
hosts = self.get("host")
if hosts is None:
hosts = DataObject()
hosts.ManagedObjectReference = []
self.set("host", hosts)
hosts.ManagedObjectReference.append(host_sys)
# Update summary every time a new host is added
self._update_summary()
def _add_datastore(self, datastore):
if datastore:
datastores = self.get("datastore")
if datastores is None:
datastores = DataObject()
datastores.ManagedObjectReference = []
self.set("datastore", datastores)
datastores.ManagedObjectReference.append(datastore)
# Method to update summary of a cluster upon host addition
def _update_summary(self):
summary = self.get("summary")
summary.numHosts = 0
summary.numCpuCores = 0
summary.numCpuThreads = 0
summary.numEffectiveHosts = 0
summary.totalMemory = 0
summary.effectiveMemory = 0
hosts = self.get("host")
# Compute the aggregate stats
summary.numHosts = len(hosts.ManagedObjectReference)
for host_ref in hosts.ManagedObjectReference:
host_sys = _get_object(host_ref)
connected = host_sys.get("connected")
host_summary = host_sys.get("summary")
summary.numCpuCores += host_summary.hardware.numCpuCores
summary.numCpuThreads += host_summary.hardware.numCpuThreads
summary.totalMemory += host_summary.hardware.memorySize
free_memory = (host_summary.hardware.memorySize / units.Mi
- host_summary.quickStats.overallMemoryUsage)
summary.effectiveMemory += free_memory if connected else 0
summary.numEffectiveHosts += 1 if connected else 0
self.set("summary", summary)
class Datastore(ManagedObject):
"""Datastore class."""
def __init__(self, name="fake-ds", capacity=1024, free=500,
accessible=True, maintenance_mode="normal"):
super(Datastore, self).__init__("ds")
self.set("summary.type", "VMFS")
self.set("summary.name", name)
self.set("summary.capacity", capacity * units.Gi)
self.set("summary.freeSpace", free * units.Gi)
self.set("summary.accessible", accessible)
self.set("summary.maintenanceMode", maintenance_mode)
self.set("browser", "")
class HostNetworkSystem(ManagedObject):
"""HostNetworkSystem class."""
def __init__(self, name="networkSystem"):
super(HostNetworkSystem, self).__init__("ns")
self.set("name", name)
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("networkInfo.pnic", net_info_pnic)
class HostStorageSystem(ManagedObject):
"""HostStorageSystem class."""
def __init__(self):
super(HostStorageSystem, self).__init__("storageSystem")
class HostSystem(ManagedObject):
"""Host System class."""
def __init__(self, name="ha-host", connected=True, ds_ref=None,
maintenance_mode=False):
super(HostSystem, self).__init__("host")
self.set("name", name)
if _db_content.get("HostNetworkSystem", None) is None:
create_host_network_system()
if not _get_object_refs('HostStorageSystem'):
create_host_storage_system()
host_net_key = _db_content["HostNetworkSystem"].keys()[0]
host_net_sys = _db_content["HostNetworkSystem"][host_net_key].obj
self.set("configManager.networkSystem", host_net_sys)
host_storage_sys_key = _get_object_refs('HostStorageSystem')[0]
self.set("configManager.storageSystem", host_storage_sys_key)
if not ds_ref:
ds_ref = create_datastore('local-host-%s' % name, 500, 500)
datastores = DataObject()
datastores.ManagedObjectReference = [ds_ref]
self.set("datastore", datastores)
summary = DataObject()
hardware = DataObject()
hardware.numCpuCores = 8
hardware.numCpuPkgs = 2
hardware.numCpuThreads = 16
hardware.vendor = "Intel"
hardware.cpuModel = "Intel(R) Xeon(R)"
hardware.uuid = "host-uuid"
hardware.memorySize = units.Gi
summary.hardware = hardware
runtime = DataObject()
if connected:
runtime.connectionState = "connected"
else:
runtime.connectionState = "disconnected"
runtime.inMaintenanceMode = maintenance_mode
summary.runtime = runtime
quickstats = DataObject()
quickstats.overallMemoryUsage = 500
summary.quickStats = quickstats
product = DataObject()
product.name = "VMware ESXi"
product.version = "5.0.0"
config = DataObject()
config.product = product
summary.config = config
pnic_do = DataObject()
pnic_do.device = "vmnic0"
net_info_pnic = DataObject()
net_info_pnic.PhysicalNic = [pnic_do]
self.set("summary", summary)
self.set("capability.maxHostSupportedVcpus", 600)
self.set("summary.hardware", hardware)
self.set("summary.runtime", runtime)
self.set("config.network.pnic", net_info_pnic)
self.set("connected", connected)
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = "vSwitch0"
vswitch_do.portgroup = ["PortGroup-vmnet0"]
net_swicth = DataObject()
net_swicth.HostVirtualSwitch = [vswitch_do]
self.set("config.network.vswitch", net_swicth)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-vmnet0"
pg_spec = DataObject()
pg_spec.vlanId = 0
pg_spec.name = "vmnet0"
host_pg_do.spec = pg_spec
host_pg = DataObject()
host_pg.HostPortGroup = [host_pg_do]
self.set("config.network.portgroup", host_pg)
config = DataObject()
storageDevice = DataObject()
iscsi_hba = HostInternetScsiHba()
iscsi_hba.iScsiName = "iscsi-name"
host_bus_adapter_array = DataObject()
host_bus_adapter_array.HostHostBusAdapter = [iscsi_hba]
storageDevice.hostBusAdapter = host_bus_adapter_array
config.storageDevice = storageDevice
self.set("config.storageDevice.hostBusAdapter", host_bus_adapter_array)
# Set the same on the storage system managed object
host_storage_sys = _get_object(host_storage_sys_key)
host_storage_sys.set('storageDeviceInfo.hostBusAdapter',
host_bus_adapter_array)
def _add_iscsi_target(self, data):
default_lun = DataObject()
default_lun.scsiLun = 'key-vim.host.ScsiDisk-010'
default_lun.key = 'key-vim.host.ScsiDisk-010'
default_lun.deviceName = 'fake-device'
default_lun.uuid = 'fake-uuid'
scsi_lun_array = DataObject()
scsi_lun_array.ScsiLun = [default_lun]
self.set("config.storageDevice.scsiLun", scsi_lun_array)
transport = DataObject()
transport.address = [data['target_portal']]
transport.iScsiName = data['target_iqn']
default_target = DataObject()
default_target.lun = [default_lun]
default_target.transport = transport
iscsi_adapter = DataObject()
iscsi_adapter.adapter = 'key-vmhba33'
iscsi_adapter.transport = transport
iscsi_adapter.target = [default_target]
iscsi_topology = DataObject()
iscsi_topology.adapter = [iscsi_adapter]
self.set("config.storageDevice.scsiTopology", iscsi_topology)
def _add_port_group(self, spec):
"""Adds a port group to the host system object in the db."""
pg_name = spec.name
vswitch_name = spec.vswitchName
vlanid = spec.vlanId
vswitch_do = DataObject()
vswitch_do.pnic = ["vmnic0"]
vswitch_do.name = vswitch_name
vswitch_do.portgroup = ["PortGroup-%s" % pg_name]
vswitches = self.get("config.network.vswitch").HostVirtualSwitch
vswitches.append(vswitch_do)
host_pg_do = DataObject()
host_pg_do.key = "PortGroup-%s" % pg_name
pg_spec = DataObject()
pg_spec.vlanId = vlanid
pg_spec.name = pg_name
host_pg_do.spec = pg_spec
host_pgrps = self.get("config.network.portgroup").HostPortGroup
host_pgrps.append(host_pg_do)
class Datacenter(ManagedObject):
"""Datacenter class."""
def __init__(self, name="ha-datacenter", ds_ref=None):
super(Datacenter, self).__init__("dc")
self.set("name", name)
self.set("vmFolder", "vm_folder_ref")
if _db_content.get("Network", None) is None:
create_network()
net_ref = _db_content["Network"][_db_content["Network"].keys()[0]].obj
network_do = DataObject()
network_do.ManagedObjectReference = [net_ref]
self.set("network", network_do)
if ds_ref:
datastore = DataObject()
datastore.ManagedObjectReference = [ds_ref]
else:
datastore = None
self.set("datastore", datastore)
class Task(ManagedObject):
"""Task class."""
def __init__(self, task_name, state="running", result=None,
error_fault=None):
super(Task, self).__init__("Task")
info = DataObject()
info.name = task_name
info.state = state
if state == 'error':
error = DataObject()
error.localizedMessage = "Error message"
if not error_fault:
error.fault = DataObject()
else:
error.fault = error_fault
info.error = error
info.result = result
self.set("info", info)
def create_host_network_system():
host_net_system = HostNetworkSystem()
_create_object("HostNetworkSystem", host_net_system)
def create_host_storage_system():
host_storage_system = HostStorageSystem()
_create_object("HostStorageSystem", host_storage_system)
def create_host(ds_ref=None):
host_system = HostSystem(ds_ref=ds_ref)
_create_object('HostSystem', host_system)
def create_datacenter(name, ds_ref=None):
data_center = Datacenter(name, ds_ref)
_create_object('Datacenter', data_center)
def create_datastore(name, capacity, free):
data_store = Datastore(name, capacity, free)
_create_object('Datastore', data_store)
return data_store.obj
def create_res_pool():
res_pool = ResourcePool()
_create_object('ResourcePool', res_pool)
return res_pool.obj
def create_network():
network = Network()
_create_object('Network', network)
def create_cluster(name, ds_ref):
cluster = ClusterComputeResource(name=name)
cluster._add_host(_get_object_refs("HostSystem")[0])
cluster._add_host(_get_object_refs("HostSystem")[1])
cluster._add_datastore(ds_ref)
cluster._add_root_resource_pool(create_res_pool())
_create_object('ClusterComputeResource', cluster)
return cluster
def create_vm(uuid=None, name=None,
cpus=1, memory=128, devices=None,
vmPathName=None, extraConfig=None,
res_pool_ref=None, host_ref=None,
version=None):
if uuid is None:
uuid = uuidutils.generate_uuid()
if name is None:
name = uuid
if devices is None:
devices = []
if vmPathName is None:
vm_path = ds_obj.DatastorePath(_db_content['Datastore'].values()[0])
else:
vm_path = ds_obj.DatastorePath.parse(vmPathName)
if res_pool_ref is None:
res_pool_ref = _db_content['ResourcePool'].keys()[0]
if host_ref is None:
host_ref = _db_content["HostSystem"].keys()[0]
# Fill in the default path to the vmx file if we were only given a
# datastore. Note that if you create a VM with vmPathName '[foo]', when you
# retrieve vmPathName it will be '[foo] uuid/uuid.vmx'. Hence we use
# vm_path below for the stored value of vmPathName.
if vm_path.rel_path == '':
vm_path = vm_path.join(name, name + '.vmx')
for key, value in _db_content["Datastore"].iteritems():
if value.get('summary.name') == vm_path.datastore:
ds = key
break
else:
ds = create_datastore(vm_path.datastore, 1024, 500)
vm_dict = {"name": name,
"ds": [ds],
"runtime_host": host_ref,
"powerstate": "poweredOff",
"vmPathName": str(vm_path),
"numCpu": cpus,
"mem": memory,
"extra_config": extraConfig,
"virtual_device": devices,
"instanceUuid": uuid,
"version": version}
vm = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", vm)
res_pool = _get_object(res_pool_ref)
res_pool.vm.ManagedObjectReference.append(vm.obj)
return vm.obj
def create_task(task_name, state="running", result=None, error_fault=None):
task = Task(task_name, state, result, error_fault)
_create_object("Task", task)
return task
def _add_file(file_path):
"""Adds a file reference to the db."""
_db_content["files"].append(file_path)
def _remove_file(file_path):
"""Removes a file reference from the db."""
# Check if the remove is for a single file object or for a folder
if file_path.find(".vmdk") != -1:
if file_path not in _db_content.get("files"):
raise vexc.FileNotFoundException(file_path)
_db_content.get("files").remove(file_path)
else:
# Removes the files in the folder and the folder too from the db
to_delete = set()
for file in _db_content.get("files"):
if file.find(file_path) != -1:
to_delete.add(file)
for file in to_delete:
_db_content.get("files").remove(file)
def fake_plug_vifs(*args, **kwargs):
"""Fakes plugging vifs."""
pass
def fake_get_network(*args, **kwargs):
"""Fake get network."""
return {'type': 'fake'}
def assertPathExists(test, path):
test.assertIn(path, _db_content.get('files'))
def assertPathNotExists(test, path):
test.assertNotIn(path, _db_content.get('files'))
def get_file(file_path):
"""Check if file exists in the db."""
return file_path in _db_content.get("files")
def fake_upload_image(context, image, instance, **kwargs):
"""Fakes the upload of an image."""
pass
def fake_fetch_image(context, instance, host, port, dc_name, ds_name,
file_path, cookies=None):
"""Fakes the fetch of an image."""
ds_file_path = "[" + ds_name + "] " + file_path
_add_file(ds_file_path)
def _get_vm_mdo(vm_ref):
"""Gets the Virtual Machine with the ref from the db."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound("There is no VM registered")
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound("Virtual Machine with ref %s is not "
"there" % vm_ref)
return _db_content.get("VirtualMachine")[vm_ref]
def _merge_extraconfig(existing, changes):
"""Imposes the changes in extraConfig over the existing extraConfig."""
existing = existing or []
if (changes):
for c in changes:
if len([x for x in existing if x.key == c.key]) > 0:
extraConf = [x for x in existing if x.key == c.key][0]
extraConf.value = c.value
else:
existing.append(c)
return existing
class FakeFactory(object):
"""Fake factory class for the suds client."""
def create(self, obj_name):
"""Creates a namespace object."""
return DataObject(obj_name)
class FakeService(DataObject):
"""Fake service class."""
def Logout(self, session_manager):
pass
def FindExtension(self, extension_manager, key):
return []
class FakeClient(DataObject):
"""Fake client class."""
def __init__(self):
"""Creates a namespace object."""
self.service = FakeService()
class FakeSession(object):
"""Fake Session Class."""
def __init__(self):
self.vim = FakeVim()
def _call_method(self, module, method, *args, **kwargs):
raise NotImplementedError()
def _wait_for_task(self, task_ref):
raise NotImplementedError()
class FakeObjectRetrievalSession(FakeSession):
"""A session for faking object retrieval tasks.
_call_method() returns a given set of objects
sequentially, regardless of the method called.
"""
def __init__(self, *ret):
super(FakeObjectRetrievalSession, self).__init__()
self.ret = ret
self.ind = 0
def _call_method(self, module, method, *args, **kwargs):
if (method == 'continue_retrieval' or
method == 'cancel_retrieval'):
return
# return fake objects in a circular manner
self.ind = (self.ind + 1) % len(self.ret)
return self.ret[self.ind - 1]
def get_fake_vim_object(vmware_api_session):
key = vmware_api_session.__repr__()
if key not in _vim_map:
_vim_map[key] = FakeVim()
return _vim_map[key]
class FakeVim(object):
"""Fake VIM Class."""
def __init__(self, protocol="https", host="localhost", trace=None):
"""Initializes the suds client object, sets the service content
contents and the cookies for the session.
"""
self._session = None
self.client = FakeClient()
self.client.factory = FakeFactory()
transport = DataObject()
transport.cookiejar = "Fake-CookieJar"
options = DataObject()
options.transport = transport
self.client.options = options
service_content = self.client.factory.create('ns0:ServiceContent')
service_content.propertyCollector = "PropCollector"
service_content.virtualDiskManager = "VirtualDiskManager"
service_content.fileManager = "FileManager"
service_content.rootFolder = "RootFolder"
service_content.sessionManager = "SessionManager"
service_content.extensionManager = "ExtensionManager"
service_content.searchIndex = "SearchIndex"
about_info = DataObject()
about_info.name = "VMware vCenter Server"
about_info.version = "5.1.0"
about_info.instanceUuid = _FAKE_VCENTER_UUID
service_content.about = about_info
self._service_content = service_content
@property
def service_content(self):
return self._service_content
def __repr__(self):
return "Fake VIM Object"
def __str__(self):
return "Fake VIM Object"
def _login(self):
"""Logs in and sets the session object in the db."""
self._session = uuidutils.generate_uuid()
session = DataObject()
session.key = self._session
session.userName = 'sessionUserName'
_db_content['session'][self._session] = session
return session
def _terminate_session(self, *args, **kwargs):
"""Terminates a session."""
s = kwargs.get("sessionId")[0]
if s not in _db_content['session']:
return
del _db_content['session'][s]
def _check_session(self):
"""Checks if the session is active."""
if (self._session is None or self._session not in
_db_content['session']):
LOG.debug("Session is faulty")
raise vexc.VimFaultException([vexc.NOT_AUTHENTICATED],
"Session Invalid")
def _session_is_active(self, *args, **kwargs):
try:
self._check_session()
return True
except Exception:
return False
def _create_vm(self, method, *args, **kwargs):
"""Creates and registers a VM object with the Host System."""
config_spec = kwargs.get("config")
if config_spec.guestId not in constants.VALID_OS_TYPES:
ex = vexc.VMwareDriverException('A specified parameter was '
'not correct.')
return create_task(method, "error", error_fault=ex).obj
pool = kwargs.get('pool')
version = getattr(config_spec, 'version', None)
devices = []
for device_change in config_spec.deviceChange:
if device_change.operation == 'add':
devices.append(device_change.device)
vm_ref = create_vm(config_spec.instanceUuid, config_spec.name,
config_spec.numCPUs, config_spec.memoryMB,
devices, config_spec.files.vmPathName,
config_spec.extraConfig, pool,
version=version)
task_mdo = create_task(method, "success", result=vm_ref)
return task_mdo.obj
def _reconfig_vm(self, method, *args, **kwargs):
"""Reconfigures a VM and sets the properties supplied."""
vm_ref = args[0]
vm_mdo = _get_vm_mdo(vm_ref)
vm_mdo.reconfig(self.client.factory, kwargs.get("spec"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _create_copy_disk(self, method, vmdk_file_path):
"""Creates/copies a vmdk file object in the datastore."""
# We need to add/create both .vmdk and .-flat.vmdk files
flat_vmdk_file_path = vmdk_file_path.replace(".vmdk", "-flat.vmdk")
_add_file(vmdk_file_path)
_add_file(flat_vmdk_file_path)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _extend_disk(self, method, size):
"""Extend disk size when create a instance."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _snapshot_vm(self, method):
"""Snapshots a VM. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _find_all_by_uuid(self, *args, **kwargs):
uuid = kwargs.get('uuid')
vm_refs = []
for vm_ref in _db_content.get("VirtualMachine"):
vm = _get_object(vm_ref)
vm_uuid = vm.get("summary.config.instanceUuid")
if vm_uuid == uuid:
vm_refs.append(vm_ref)
return vm_refs
def _delete_snapshot(self, method, *args, **kwargs):
"""Deletes a VM snapshot. Here we do nothing for faking sake."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _delete_file(self, method, *args, **kwargs):
"""Deletes a file from the datastore."""
_remove_file(kwargs.get("name"))
task_mdo = create_task(method, "success")
return task_mdo.obj
def _just_return(self):
"""Fakes a return."""
return
def _just_return_task(self, method):
"""Fakes a task return."""
task_mdo = create_task(method, "success")
return task_mdo.obj
def _clone_vm(self, method, *args, **kwargs):
"""Fakes a VM clone."""
"""Creates and registers a VM object with the Host System."""
source_vmref = args[0]
source_vm_mdo = _get_vm_mdo(source_vmref)
clone_spec = kwargs.get("spec")
vm_dict = {
"name": kwargs.get("name"),
"ds": source_vm_mdo.get("datastore"),
"runtime_host": source_vm_mdo.get("runtime.host"),
"powerstate": source_vm_mdo.get("runtime.powerState"),
"vmPathName": source_vm_mdo.get("config.files.vmPathName"),
"numCpu": source_vm_mdo.get("summary.config.numCpu"),
"mem": source_vm_mdo.get("summary.config.memorySizeMB"),
"extra_config": source_vm_mdo.get("config.extraConfig").OptionValue,
"virtual_device":
source_vm_mdo.get("config.hardware.device").VirtualDevice,
"instanceUuid": source_vm_mdo.get("summary.config.instanceUuid")}
if hasattr(clone_spec, 'config'):
# Impose the config changes specified in the config property
if (hasattr(clone_spec.config, 'instanceUuid') and
clone_spec.config.instanceUuid is not None):
vm_dict["instanceUuid"] = clone_spec.config.instanceUuid
if hasattr(clone_spec.config, 'extraConfig'):
extraConfigs = _merge_extraconfig(vm_dict["extra_config"],
clone_spec.config.extraConfig)
vm_dict["extra_config"] = extraConfigs
virtual_machine = VirtualMachine(**vm_dict)
_create_object("VirtualMachine", virtual_machine)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _unregister_vm(self, method, *args, **kwargs):
"""Unregisters a VM from the Host System."""
vm_ref = args[0]
_get_vm_mdo(vm_ref)
del _db_content["VirtualMachine"][vm_ref]
task_mdo = create_task(method, "success")
return task_mdo.obj
def _search_ds(self, method, *args, **kwargs):
"""Searches the datastore for a file."""
# TODO(garyk): add support for spec parameter
ds_path = kwargs.get("datastorePath")
matched_files = set()
# Check if we are searching for a file or a directory
directory = False
dname = '%s/' % ds_path
for file in _db_content.get("files"):
if file == dname:
directory = True
break
# A directory search implies that we must return all
# subdirectories
if directory:
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
if not file.endswith(ds_path):
path = file.replace(dname, '', 1).split('/')
if path:
matched_files.add(path[0])
if not matched_files:
matched_files.add('/')
else:
for file in _db_content.get("files"):
if file.find(ds_path) != -1:
matched_files.add(ds_path)
if matched_files:
result = DataObject()
result.path = ds_path
result.file = []
for file in matched_files:
matched = DataObject()
matched.path = file
result.file.append(matched)
task_mdo = create_task(method, "success", result=result)
else:
task_mdo = create_task(method, "error", error_fault=FileNotFound())
return task_mdo.obj
def _move_file(self, method, *args, **kwargs):
source = kwargs.get('sourceName')
destination = kwargs.get('destinationName')
new_files = []
if source != destination:
for file in _db_content.get("files"):
if source in file:
new_file = file.replace(source, destination)
new_files.append(new_file)
# if source is not a file then the children will also
# be deleted
_remove_file(source)
for file in new_files:
_add_file(file)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _make_dir(self, method, *args, **kwargs):
"""Creates a directory in the datastore."""
ds_path = kwargs.get("name")
if get_file(ds_path):
raise vexc.FileAlreadyExistsException()
_db_content["files"].append('%s/' % ds_path)
def _set_power_state(self, method, vm_ref, pwr_state="poweredOn"):
"""Sets power state for the VM."""
if _db_content.get("VirtualMachine", None) is None:
raise exception.NotFound("No Virtual Machine has been "
"registered yet")
if vm_ref not in _db_content.get("VirtualMachine"):
raise exception.NotFound("Virtual Machine with ref %s is not "
"there" % vm_ref)
vm_mdo = _db_content.get("VirtualMachine").get(vm_ref)
vm_mdo.set("runtime.powerState", pwr_state)
task_mdo = create_task(method, "success")
return task_mdo.obj
def _retrieve_properties_continue(self, method, *args, **kwargs):
"""Continues the retrieve."""
return FakeRetrieveResult()
def _retrieve_properties_cancel(self, method, *args, **kwargs):
"""Cancels the retrieve."""
return None
def _retrieve_properties(self, method, *args, **kwargs):
"""Retrieves properties based on the type."""
spec_set = kwargs.get("specSet")[0]
spec_type = spec_set.propSet[0].type
properties = spec_set.propSet[0].pathSet
if not isinstance(properties, list):
properties = properties.split()
objs = spec_set.objectSet
lst_ret_objs = FakeRetrieveResult()
for obj in objs:
try:
obj_ref = obj.obj
if obj_ref == "RootFolder":
# This means that we are retrieving props for all managed
# data objects of the specified 'type' in the entire
# inventory. This gets invoked by vim_util.get_objects.
mdo_refs = _db_content[spec_type]
elif obj_ref.type != spec_type:
# This means that we are retrieving props for the managed
# data objects in the parent object's 'path' property.
# This gets invoked by vim_util.get_inner_objects
# eg. obj_ref = <ManagedObjectReference of a cluster>
# type = 'DataStore'
# path = 'datastore'
# the above will retrieve all datastores in the given
# cluster.
parent_mdo = _db_content[obj_ref.type][obj_ref]
path = obj.selectSet[0].path
mdo_refs = parent_mdo.get(path).ManagedObjectReference
else:
# This means that we are retrieving props of the given
# managed data object. This gets invoked by
# vim_util.get_properties_for_a_collection_of_objects.
mdo_refs = [obj_ref]
for mdo_ref in mdo_refs:
mdo = _db_content[spec_type][mdo_ref]
prop_list = []
for prop_name in properties:
prop = Prop(prop_name, mdo.get(prop_name))
prop_list.append(prop)
obj_content = ObjectContent(mdo.obj, prop_list)
lst_ret_objs.add_object(obj_content)
except Exception:
LOG.exception("_retrieve_properties error")
continue
return lst_ret_objs
def _add_port_group(self, method, *args, **kwargs):
"""Adds a port group to the host system."""
_host_sk = _db_content["HostSystem"].keys()[0]
host_mdo = _db_content["HostSystem"][_host_sk]
host_mdo._add_port_group(kwargs.get("portgrp"))
def _add_iscsi_send_tgt(self, method, *args, **kwargs):
"""Adds a iscsi send target to the hba."""
send_targets = kwargs.get('targets')
host_storage_sys = _get_objects('HostStorageSystem').objects[0]
iscsi_hba_array = host_storage_sys.get('storageDeviceInfo'
'.hostBusAdapter')
iscsi_hba = iscsi_hba_array.HostHostBusAdapter[0]
if hasattr(iscsi_hba, 'configuredSendTarget'):
iscsi_hba.configuredSendTarget.extend(send_targets)
else:
iscsi_hba.configuredSendTarget = send_targets
def __getattr__(self, attr_name):
if attr_name != "Login":
self._check_session()
if attr_name == "Login":
return lambda *args, **kwargs: self._login()
elif attr_name == "SessionIsActive":
return lambda *args, **kwargs: self._session_is_active(
*args, **kwargs)
elif attr_name == "TerminateSession":
return lambda *args, **kwargs: self._terminate_session(
*args, **kwargs)
elif attr_name == "CreateVM_Task":
return lambda *args, **kwargs: self._create_vm(attr_name,
*args, **kwargs)
elif attr_name == "ReconfigVM_Task":
return lambda *args, **kwargs: self._reconfig_vm(attr_name,
*args, **kwargs)
elif attr_name == "CreateVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("name"))
elif attr_name == "DeleteDatastoreFile_Task":
return lambda *args, **kwargs: self._delete_file(attr_name,
*args, **kwargs)
elif attr_name == "PowerOnVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "PowerOffVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOff")
elif attr_name == "RebootGuest":
return lambda *args, **kwargs: self._just_return()
elif attr_name == "ResetVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "poweredOn")
elif attr_name == "SuspendVM_Task":
return lambda *args, **kwargs: self._set_power_state(attr_name,
args[0], "suspended")
elif attr_name == "CreateSnapshot_Task":
return lambda *args, **kwargs: self._snapshot_vm(attr_name)
elif attr_name == "RemoveSnapshot_Task":
return lambda *args, **kwargs: self._delete_snapshot(attr_name,
*args, **kwargs)
elif attr_name == "CopyVirtualDisk_Task":
return lambda *args, **kwargs: self._create_copy_disk(attr_name,
kwargs.get("destName"))
elif attr_name == "ExtendVirtualDisk_Task":
return lambda *args, **kwargs: self._extend_disk(attr_name,
kwargs.get("size"))
elif attr_name == "Destroy_Task":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "UnregisterVM":
return lambda *args, **kwargs: self._unregister_vm(attr_name,
*args, **kwargs)
elif attr_name == "CloneVM_Task":
return lambda *args, **kwargs: self._clone_vm(attr_name,
*args, **kwargs)
elif attr_name == "FindAllByUuid":
return lambda *args, **kwargs: self._find_all_by_uuid(attr_name,
*args, **kwargs)
elif attr_name == "SearchDatastore_Task":
return lambda *args, **kwargs: self._search_ds(attr_name,
*args, **kwargs)
elif attr_name == "MoveDatastoreFile_Task":
return lambda *args, **kwargs: self._move_file(attr_name,
*args, **kwargs)
elif attr_name == "MakeDirectory":
return lambda *args, **kwargs: self._make_dir(attr_name,
*args, **kwargs)
elif attr_name == "RetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties(
attr_name, *args, **kwargs)
elif attr_name == "ContinueRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_continue(
attr_name, *args, **kwargs)
elif attr_name == "CancelRetrievePropertiesEx":
return lambda *args, **kwargs: self._retrieve_properties_cancel(
attr_name, *args, **kwargs)
elif attr_name == "AddPortGroup":
return lambda *args, **kwargs: self._add_port_group(attr_name,
*args, **kwargs)
elif attr_name in ("RebootHost_Task",
"ShutdownHost_Task",
"PowerUpHostFromStandBy_Task",
"EnterMaintenanceMode_Task",
"ExitMaintenanceMode_Task",
"RescanHba"):
return lambda *args, **kwargs: self._just_return_task(attr_name)
elif attr_name == "AddInternetScsiSendTargets":
return lambda *args, **kwargs: self._add_iscsi_send_tgt(attr_name,
*args, **kwargs)
|
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.cloud.vision.v1p3beta1 ImageAnnotator API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.grpc_helpers
import google.api_core.operation
from google.api_core import operations_v1
import grpc
from google.cloud.vision_v1p3beta1.gapic import enums
from google.cloud.vision_v1p3beta1.gapic import image_annotator_client_config
from google.cloud.vision_v1p3beta1.gapic.transports import (
image_annotator_grpc_transport,
)
from google.cloud.vision_v1p3beta1.proto import image_annotator_pb2
from google.cloud.vision_v1p3beta1.proto import image_annotator_pb2_grpc
from google.cloud.vision_v1p3beta1.proto import product_search_service_pb2
from google.cloud.vision_v1p3beta1.proto import product_search_service_pb2_grpc
from google.longrunning import operations_pb2
from google.protobuf import empty_pb2
from google.protobuf import field_mask_pb2
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution("google-cloud-vision").version
class ImageAnnotatorClient(object):
"""
Service that performs Google Cloud Vision API detection tasks over client
images, such as face, landmark, logo, label, and text detection. The
ImageAnnotator service returns detected entities from the images.
"""
SERVICE_ADDRESS = "vision.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.cloud.vision.v1p3beta1.ImageAnnotator"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ImageAnnotatorClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.ImageAnnotatorGrpcTransport,
Callable[[~.Credentials, type], ~.ImageAnnotatorGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = image_annotator_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=image_annotator_grpc_transport.ImageAnnotatorGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = image_annotator_grpc_transport.ImageAnnotatorGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
# Service calls
def batch_annotate_images(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Run image detection and annotation for a batch of images.
Example:
>>> from google.cloud import vision_v1p3beta1
>>>
>>> client = vision_v1p3beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.batch_annotate_images(requests)
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p3beta1.types.AnnotateImageRequest]]): Individual image annotation requests for this batch.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p3beta1.types.AnnotateImageRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p3beta1.types.BatchAnnotateImagesResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "batch_annotate_images" not in self._inner_api_calls:
self._inner_api_calls[
"batch_annotate_images"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.batch_annotate_images,
default_retry=self._method_configs["BatchAnnotateImages"].retry,
default_timeout=self._method_configs["BatchAnnotateImages"].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.BatchAnnotateImagesRequest(requests=requests)
return self._inner_api_calls["batch_annotate_images"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def async_batch_annotate_files(
self,
requests,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Run asynchronous image detection and annotation for a list of generic
files, such as PDF files, which may contain multiple pages and multiple
images per page. Progress and results can be retrieved through the
``google.longrunning.Operations`` interface. ``Operation.metadata``
contains ``OperationMetadata`` (metadata). ``Operation.response``
contains ``AsyncBatchAnnotateFilesResponse`` (results).
Example:
>>> from google.cloud import vision_v1p3beta1
>>>
>>> client = vision_v1p3beta1.ImageAnnotatorClient()
>>>
>>> # TODO: Initialize `requests`:
>>> requests = []
>>>
>>> response = client.async_batch_annotate_files(requests)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
requests (list[Union[dict, ~google.cloud.vision_v1p3beta1.types.AsyncAnnotateFileRequest]]): Individual async file annotation requests for this batch.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.vision_v1p3beta1.types.AsyncAnnotateFileRequest`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.vision_v1p3beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "async_batch_annotate_files" not in self._inner_api_calls:
self._inner_api_calls[
"async_batch_annotate_files"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.async_batch_annotate_files,
default_retry=self._method_configs["AsyncBatchAnnotateFiles"].retry,
default_timeout=self._method_configs["AsyncBatchAnnotateFiles"].timeout,
client_info=self._client_info,
)
request = image_annotator_pb2.AsyncBatchAnnotateFilesRequest(requests=requests)
operation = self._inner_api_calls["async_batch_annotate_files"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
image_annotator_pb2.AsyncBatchAnnotateFilesResponse,
metadata_type=image_annotator_pb2.OperationMetadata,
)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright INRIA
# Contributors: Nicolas P. Rougier ([email protected])
#
# DANA is a computing framework for the simulation of distributed,
# asynchronous, numerical and adaptive models.
#
# This software is governed by the CeCILL license under French law and abiding
# by the rules of distribution of free software. You can use, modify and/ or
# redistribute the software under the terms of the CeCILL license as circulated
# by CEA, CNRS and INRIA at the following URL
# http://www.cecill.info/index.en.html.
#
# As a counterpart to the access to the source code and rights to copy, modify
# and redistribute granted by the license, users are provided only with a
# limited warranty and the software's author, the holder of the economic
# rights, and the successive licensors have only limited liability.
#
# In this respect, the user's attention is drawn to the risks associated with
# loading, using, modifying and/or developing or reproducing the software by
# the user in light of its specific status of free software, that may mean that
# it is complicated to manipulate, and that also therefore means that it is
# reserved for developers and experienced professionals having in-depth
# computer knowledge. Users are therefore encouraged to load and test the
# software's suitability as regards their requirements in conditions enabling
# the security of their systems and/or data to be ensured and, more generally,
# to use and operate it in the same conditions as regards security.
#
# The fact that you are presently reading this means that you have had
# knowledge of the CeCILL license and that you accept its terms.
# -----------------------------------------------------------------------------
import unittest
from numpy import *
import scipy.sparse as sp
from tools import np_equal
from dana import SparseConnection as Connection
class SparseOneDimensionTestCase(unittest.TestCase):
def test_1(self):
assert np_equal( Connection(ones(3), ones(3), ones(1)).output(),
ones(3))
def test_2(self):
assert np_equal( Connection(ones(3), ones(5), ones(1)).output(),
ones(5))
def test_3(self):
assert np_equal( Connection(ones(5), ones(3), ones(1)).output(),
ones(3))
def test_4(self):
assert np_equal( Connection(ones(3), ones(3), ones(3)).output(),
array([2,3,2]))
def test_5(self):
assert np_equal( Connection(ones(3), ones(5), ones(3)).output(),
array([2,2,3,2,2]))
def test_6(self):
assert np_equal( Connection(ones(5), ones(3), ones(3)).output(),
array([2,3,2]))
def test_7(self):
assert np_equal( Connection(ones(3), ones(3), array([1,NaN,1])).output(),
array([1,2,1]))
def test_8(self):
assert np_equal( Connection(ones(3), ones(3), array([NaN,NaN,NaN])).output(),
zeros(3))
def test_9(self):
assert np_equal( Connection(ones(3), ones(3), ones((3,3))).output(),
3*ones(3))
def test_10(self):
C = Connection(ones(3), ones(3), ones(1))
assert np_equal(C[0], array([1, NaN, NaN]))
assert np_equal(C[1], array([NaN, 1, NaN]))
assert np_equal(C[2], array([NaN, NaN, 1]))
def test_11(self):
assert np_equal(Connection(ones(3), ones(3), ones(3), toric=True).output(),
ones(3)*3)
def test_11(self):
assert np_equal(Connection(ones(3), ones(3), ones(3), toric=True).output(),
ones(3)*3)
def test_11_bis(self):
assert np_equal(Connection(ones(3), ones(3), ones(20), toric=True).output(),
ones(3)*3)
def test_12(self):
Z = ones(5)
K = arange(5)
C = Connection(Z,Z,K)
assert np_equal(C[0], array([2,3,4,NaN,NaN]))
def test_13(self):
Z = ones(5)
K = arange(5)
C = Connection(Z,Z,K)
assert np_equal(C[2],K)
def test_14(self):
Z = ones(5)
K = arange(5)
C = Connection(Z,Z,K)
assert np_equal(C[4], array([NaN,NaN,0,1,2]))
class SparseTwoDimensionTestCase(unittest.TestCase):
def test_1(self):
assert np_equal( Connection(ones((3,3)), ones((3,3)), ones((1,1))).output(),
ones((3,3)))
def test_2(self):
assert np_equal( Connection(ones((3,3)), ones((5,5)), ones((1,1))).output(),
ones((5,5)))
def test_3(self):
assert np_equal( Connection(ones((5,5)), ones((3,3)), ones((1,1))).output(),
ones((3,3)))
def test_4(self):
assert np_equal( Connection(ones((3,3)), ones((3,3)), ones((3,3))).output(),
array([[4,6,4],
[6,9,6],
[4,6,4]]))
def test_5(self):
assert np_equal( Connection(ones((3,3)), ones((5,5)), ones((3,3))).output(),
array([[4,4,6,4,4],
[4,4,6,4,4],
[6,6,9,6,6],
[4,4,6,4,4],
[4,4,6,4,4]]))
def test_6(self):
assert np_equal( Connection(ones((5,5)), ones((3,3)), ones((3,3))).output(),
array([[4,6,4],
[6,9,6],
[4,6,4]]))
def test_7(self):
assert np_equal( Connection(ones((3,3)), ones((3,3)), array([[1, 1, 1],
[1,NaN,1],
[1, 1, 1]])).output(),
array([[3,5,3],
[5,8,5],
[3,5,3]]))
def test_8(self):
assert np_equal( Connection(ones((3,3)), ones((3,3)), ones((3,3))*NaN).output(),
zeros((3,3)) )
def test_9(self):
assert np_equal( Connection(ones((3,3)), ones((3,3)), ones((9,9))).output(),
9*ones((3,3)))
def test_10(self):
C = Connection(ones((3,3)), ones((3,3)), ones((1,1)))
assert np_equal(C[1,1], array([[NaN, NaN, NaN],
[NaN, 1, NaN],
[NaN, NaN, NaN]]))
def test_11(self):
assert np_equal(Connection(ones((3,3)), ones((3,3)), ones((3,3)), toric=True).output(),
ones((3,3))*9)
def test_11_bis(self):
assert np_equal(Connection(ones((3,3)), ones((3,3)), ones((20,20)), toric=True).output(),
ones((3,3))*9)
def test_11_ter(self):
assert np_equal(Connection(ones((3,3)), ones((3,3)), ones((1,20)), toric=True).output(),
ones((3,3))*3)
def test_12(self):
Z = ones((5,5))
K = arange(5*5).reshape((5,5))
C = Connection(Z,Z,K)
assert np_equal(C[0,0],
array([[12, 13, 14,NaN,NaN],
[17, 18, 19,NaN,NaN],
[22, 23, 24,NaN,NaN],
[NaN,NaN,NaN,NaN,NaN],
[NaN,NaN,NaN,NaN,NaN]]))
def test_13(self):
Z = ones((5,5))
K = arange(5*5).reshape((5,5))
C = Connection(Z,Z,K)
assert np_equal(C[0,4],
array([[NaN,NaN,10, 11, 12],
[NaN,NaN,15, 16, 17],
[NaN,NaN,20, 21, 22],
[NaN,NaN,NaN,NaN,NaN],
[NaN,NaN,NaN,NaN,NaN]]))
def test_14(self):
Z = ones((5,5))
K = arange(5*5).reshape((5,5))
C = Connection(Z,Z,K)
assert np_equal(C[4,0],
array([[NaN,NaN,NaN,NaN,NaN],
[NaN,NaN,NaN,NaN,NaN],
[ 2, 3, 4,NaN,NaN],
[ 7, 8, 9,NaN,NaN],
[ 12, 13, 14,NaN,NaN]]))
def test_15(self):
Z = ones((5,5))
K = arange(5*5).reshape((5,5))
C = Connection(Z,Z,K)
assert np_equal(C[4,4],
array([[NaN,NaN,NaN,NaN,NaN],
[NaN,NaN,NaN,NaN,NaN],
[NaN,NaN, 0, 1, 2],
[NaN,NaN, 5, 6, 7],
[NaN,NaN, 10, 11, 12]]))
def test_16(self):
Z = ones((5,5))
K = arange(5*5).reshape((5,5))
C = Connection(Z,Z,K)
assert np_equal(C[2,2],K)
if __name__ == "__main__":
unittest.main()
|
|
from __future__ import print_function, division
import sys
from copy import copy
import numpy as np
from numpy import require, zeros_like
from scipy.linalg import blas
from timeit import default_timer as timer
from pyscf.data.nist import HARTREE2EV
from pyscf.nao.chi0_matvec import chi0_matvec
from pyscf.nao.m_blas_wrapper import spmv_wrapper
from pyscf.nao.m_pack2den import pack2den_u
class tddft_iter(chi0_matvec):
"""
Iterative TDDFT a la PK, DF, OC JCTC
Input Parameters:
-----------------
kw: keywords arguments:
* tddft_iter_tol (real, default: 1e-3): tolerance to reach for
convergency in the iterative procedure.
* tmp_fname (string, default None): temporary file to save polarizability
at each frequency. Can be a life saver for large systems.
"""
def __init__(self, **kw):
self.load_kernel = load_kernel = kw['load_kernel'] if 'load_kernel' in kw else False
self.maxiter = kw['maxiter'] if 'maxiter' in kw else 1000
self.tddft_iter_tol = kw['tddft_iter_tol'] if 'tddft_iter_tol' in kw else 1e-3
self.res_method = kw["res_method"] if "res_method" in kw else "both"
assert self.tddft_iter_tol>1e-6
# better to check input before to initialize calculations
chi0_matvec.__init__(self, **kw)
if self.scipy_ver < 1 and self.res_method != "both":
import warnings
warnings.warn("scipy.__version__ < 1, the res_method both will be used!")
self.xc_code_mf = copy(self.xc_code)
self.xc_code = xc_code = kw['xc_code'] if 'xc_code' in kw else self.xc_code
self.matvec_ncalls = 0
if not hasattr(self, 'pb'):
print(__name__, 'no pb?')
print(__name__, kw.keys())
return
self.spmv = spmv_wrapper
if self.scipy_ver > 0:
if self.dtype == np.float32: self.spmv = blas.sspmv
elif self.dtype == np.float64: self.spmv = blas.dspmv
else: raise ValueError("dtype can be only float32 or float64")
xc = xc_code.split(',')[0]
if load_kernel:
self.load_kernel_method(**kw)
if self.nspin==1:
self.ss2kernel = [[self.kernel]]
elif self.nspin==2:
self.ss2kernel = [[self.kernel,self.kernel], [self.kernel,self.kernel]]
if xc!='RPA' and self.nspin!=1: raise RuntimeError('not sure it would work')
else:
self.kernel,self.kernel_dim = self.pb.comp_coulomb_pack(dtype=self.dtype) # Lower Triangular
assert self.nprod==self.kernel_dim,"{} {}".format(self.nprod,self.kernel_dim)
if self.nspin==1:
self.ss2kernel = [[self.kernel]]
elif self.nspin==2:
self.ss2kernel = [[self.kernel,self.kernel], [self.kernel,self.kernel]]
# List of POINTERS !!! of kernel [[(up,up), (up,dw)], [(dw,up), (dw,dw)]] TAKE CARE!!!
if xc=='RPA' or xc=='HF':
pass
elif xc=='LDA' or xc=='GGA':
if self.nspin==1:
self.comp_fxc_pack(kernel=self.kernel, **kw)
elif self.nspin==2:
kkk = self.comp_fxc_pack(**kw) + self.kernel
self.ss2kernel = [[kkk[0], kkk[1]], [kkk[1],kkk[2]]]
for s in range(self.nspin):
for t in range(self.nspin): assert self.ss2kernel[s][t].dtype==self.dtype
else:
print(' xc_code', xc_code, xc, xc_code.split(','))
raise RuntimeError('unkn xc_code')
if self.verbosity>0 : print(__name__,'\t====> self.xc_code:', self.xc_code)
def load_kernel_method(self, kernel_fname, kernel_format="npy", kernel_path_hdf5=None, **kw):
""" Loads from file and initializes .kernel field... Useful? Rewrite?"""
if kernel_format == "npy":
self.kernel = self.dtype(np.load(kernel_fname))
elif kernel_format == "txt":
self.kernel = np.loadtxt(kernel_fname, dtype=self.dtype)
elif kernel_format == "hdf5":
import h5py
if kernel_path_hdf5 is None:
raise ValueError("kernel_path_hdf5 not set while trying to read kernel from hdf5 file.")
self.kernel = h5py.File(kernel_fname, "r")[kernel_path_hdf5].value
else:
raise ValueError("Wrong format for loading kernel, must be: npy, txt or hdf5, got " + kernel_format)
if len(self.kernel.shape) > 1:
raise ValueError("The kernel must be saved in packed format in order to be loaded!")
assert self.nprod*(self.nprod+1)//2 == self.kernel.size, "wrong size for loaded kernel: %r %r "%(self.nprod*(self.nprod+1)//2, self.kernel.size)
self.kernel_dim = self.nprod
def comp_fxc_lil(self, **kw):
"""Computes the sparse version of the TDDFT interaction kernel"""
from pyscf.nao.m_vxc_lil import vxc_lil
return vxc_lil(self, deriv=2, ao_log=self.pb.prod_log, **kw)
def comp_fxc_pack(self, **kw):
"""Computes the packed version of the TDDFT interaction kernel """
from pyscf.nao.m_vxc_pack import vxc_pack
return vxc_pack(self, deriv=2, ao_log=self.pb.prod_log, **kw)
def comp_veff(self, vext, comega=1j*0.0, x0=None):
""" This computes an effective field (scalar potential) given the external scalar potential """
from scipy.sparse.linalg import LinearOperator, lgmres
nsp = self.nspin*self.nprod
assert len(vext)==nsp, "{} {}".format(len(vext), nsp)
self.comega_current = comega
veff_op = LinearOperator((nsp,nsp), matvec=self.vext2veff_matvec, dtype=self.dtypeComplex)
if self.res_method == "absolute":
tol = 0.0
atol = self.tddft_iter_tol
elif self.res_method == "relative":
tol = self.tddft_iter_tol
atol = 0.0
elif self.res_method == "both":
tol = self.tddft_iter_tol
atol = self.tddft_iter_tol
else:
raise ValueError("Unknow res_method")
resgm, info = lgmres(veff_op, np.require(vext, dtype=self.dtypeComplex,
requirements='C'),
x0=x0, tol=tol, atol=atol, maxiter=self.maxiter)
if info != 0: print("LGMRES Warning: info = {0}".format(info))
return resgm
def vext2veff_matvec(self, vin):
self.matvec_ncalls+=1
dn0 = self.apply_rf0(vin, self.comega_current)
vcre,vcim = self.apply_kernel(dn0)
return vin - (vcre + 1.0j*vcim)
def vext2veff_matvec2(self, vin):
self.matvec_ncalls+=1
dn0 = self.apply_rf0(vin, self.comega_current)
vcre,vcim = self.apply_kernel(dn0)
return 1- (vin - (vcre + 1.0j*vcim))
def apply_kernel(self, dn):
if self.nspin==1:
return self.apply_kernel_nspin1(dn)
elif self.nspin==2:
return self.apply_kernel_nspin2(dn)
def apply_kernel_nspin1(self, dn):
daux = np.zeros(self.nprod, dtype=self.dtype)
daux[:] = require(dn.real, dtype=self.dtype, requirements=["A","O"])
vcre = self.spmv(self.nprod, 1.0, self.kernel, daux)
daux[:] = require(dn.imag, dtype=self.dtype, requirements=["A","O"])
vcim = self.spmv(self.nprod, 1.0, self.kernel, daux)
return vcre,vcim
def apply_kernel_nspin2(self, dn):
vcre = np.zeros((2,self.nspin,self.nprod), dtype=self.dtype)
daux = np.zeros((self.nprod), dtype=self.dtype)
s2dn = dn.reshape((self.nspin,self.nprod))
for s in range(self.nspin):
for t in range(self.nspin):
for ireim,sreim in enumerate(('real', 'imag')):
daux[:] = require(getattr(s2dn[t], sreim), dtype=self.dtype, requirements=["A","O"])
vcre[ireim,s] += self.spmv(self.nprod, 1.0, self.ss2kernel[s][t], daux)
return vcre[0].reshape(-1),vcre[1].reshape(-1)
def comp_polariz_inter_xx(self, comegas, tmp_fname=None):
""" Compute the interacting polarizability along the xx direction """
pxx = np.zeros(comegas.shape, dtype=self.dtypeComplex)
if tmp_fname is not None:
assert isinstance(tmp_fname, str), "tmp_fname must be a string"
vext = np.transpose(self.moms1)
nww, eV = len(comegas), 27.2114
for iw, comega in enumerate(comegas):
if self.verbosity>0: print(iw, nww, comega.real*HARTREE2EV)
veff = self.comp_veff(vext[0], comega)
dn = self.apply_rf0(veff, comega)
pxx[iw] = np.dot(vext[0], dn)
if tmp_fname is not None:
tmp = open(tmp_fname, "a")
tmp.write("{0} {1} {2}\n".format(comega.real, pxx[iw].real,
pxx[iw].imag))
tmp.close() # Need to open and close the file at every freq, otherwise
# tmp is written only at the end of the calculations, therefore,
# it is useless
return pxx
def comp_polariz_inter_ave(self, comegas, tmp_fname=None, **kw):
""" Compute average interacting polarizability """
verbosity = kw['verbosity'] if 'verbosity' in kw else self.verbosity
sh = comegas.shape if hasattr(comegas, 'shape') else (len(comegas))
p_avg = np.zeros(sh, dtype=self.dtypeComplex)
if tmp_fname is not None:
assert isinstance(tmp_fname, str), "tmp_fname must be a string"
nww = len(comegas)
for iw, comega in enumerate(comegas):
for xyz in range(3):
vext = np.concatenate([self.moms1[:,xyz] for s in range(self.nspin)])
if verbosity>0: print(__name__, xyz, iw, nww, comega*HARTREE2EV)
veff = self.comp_veff(vext, comega)
dn = self.apply_rf0(veff, comega)
p_avg[iw] += np.dot(vext, dn)
if tmp_fname is not None:
tmp = open(tmp_fname, "a")
tmp.write("{0} {1} {2}\n".format(comega.real, p_avg[iw].real/3.0,
p_avg[iw].imag/3.0))
tmp.close() # Need to open and close the file at every freq, otherwise
# tmp is written only at the end of the calculations, therefore,
# it is useless
return p_avg/3.0
polariz_inter_ave = comp_polariz_inter_ave
def comp_dens_inter_along_Eext(self, comegas, Eext=np.array([1.0,0.0,0.0]),tmp_fname=None):
"""
Compute a the average interacting polarizability along the Eext direction
for the frequencies comegas.
Input Parameters:
comegas (1D array, complex): the real part contains the frequencies at which the polarizability
should be computed. The imaginary part id the width of the polarizability define as self.eps
Eext (1D xyz array, real): direction of the external field
maxiter (integer): max number of iteration before to exit iteration loop in GMRES
Other Calculated quantity:
self.p_mat (complex array, dim: [3, 3, comega.size]): store the (3, 3) polarizability matrix
[[Pxx, Pxy, Pxz],
[Pyx, Pyy, Pyz],
[Pzx, Pzy, Pzz]] for each frequency.
self.dn (complex array, dim: [3, comegas.size, self.nprod]): store the density change
"""
if tmp_fname is not None:
if not isinstance(tmp_fname, str):
raise ValueError("tmp_fname must be a string")
else:
tmp_re = open(tmp_fname+".real", "a")
tmp_re.write("# All atomic units\n")
tmp_re.write("# w (Ha) Pxx Pxy Pxz Pyx Pyy Pyz Pzx Pzy Pzz\n")
tmp_im = open(tmp_fname+".imag", "a")
tmp_im.write("# All atomic units\n")
tmp_im.write("# w Pxx Pxy Pxz Pyx Pyy Pyz Pzx Pzy Pzz\n")
assert Eext.size == 3
self.p_mat = np.zeros((3, 3, comegas.size), dtype=self.dtypeComplex)
self.dn = np.zeros((3, comegas.size, self.nprod), dtype=self.dtypeComplex)
Edir = Eext/np.dot(Eext, Eext)
vext = np.transpose(self.moms1)
nww, eV = len(comegas), 27.211386024367243
if tmp_fname is not None:
for iw,comega in enumerate(comegas):
for xyz, Exyz in enumerate(Edir):
if Exyz == 0.0: continue
if self.verbosity>0:
print("dir: {0}, w: {1}/{2}: ".format(xyz, iw, nww), comega*eV)
veff = self.comp_veff(vext[xyz], comega)
self.dn[xyz, iw, :] = self.apply_rf0(veff, comega)
for xyzp, Exyzp in enumerate(Edir):
self.p_mat[xyz, xyzp, iw] = np.dot(vext[xyzp], self.dn[xyz, iw, :])
tmp_re = open(tmp_fname+".real", "a")
tmp_re.write("{0} ".format(comega.real))
tmp_im = open(tmp_fname+".imag", "a")
tmp_im.write("{0} ".format(comega.real))
for i in range(3):
for j in range(3):
tmp_re.write("{0} ".format(self.p_mat[i, j, iw].real))
tmp_im.write("{0} ".format(self.p_mat[i, j, iw].imag))
tmp_re.write("\n")
tmp_im.write("\n")
tmp_re.close() # Need to open and close the file at every freq, otherwise
# tmp is written only at the end of the calculations, therefore,
# it is useless
tmp_im.close()
else:
for xyz, Exyz in enumerate(Edir):
if Exyz == 0.0: continue
for iw,comega in enumerate(comegas):
print(xyz, iw)
if self.verbosity>0:
print("dir: {0}/3, w: {1}/{2}: ".format(xyz, iw, nww), comega*eV)
veff = self.comp_veff(vext[xyz], comega)
self.dn[xyz, iw, :] = self.apply_rf0(veff, comega)
self.p_mat = np.einsum("jp,iwp->ijw", vext, self.dn)
|
|
# -*- coding: utf-8 -*-
import copy
import datetime
import json
import logging
import os
import sys
import time
import traceback
import argparse
import kibana
from alerts import DebugAlerter
from config import get_rule_hashes
from config import load_configuration
from config import load_rules
from elasticsearch.client import Elasticsearch
from elasticsearch.exceptions import ElasticsearchException
from util import dt_to_ts
from util import EAException
from util import format_index
from util import pretty_ts
from util import replace_hits_ts
from util import seconds
from util import ts_add
from util import ts_now
from util import ts_to_dt
class ElastAlerter():
""" The main Elastalert runner. This class holds all state about active rules,
controls when queries are run, and passes information between rules and alerts.
:param args: An argparse arguments instance. Should contain debug and start
:param conf: The configuration dictionary. At the top level, this
contains global options, and under 'rules', contains all state relating
to rules and alerts. In each rule in conf['rules'], the RuleType and Alerter
instances live under 'type' and 'alerts', respectively. The conf dictionary
should not be passed directly from a configuration file, but must be populated
by config.py:load_rules instead. """
def parse_args(self, args):
parser = argparse.ArgumentParser()
parser.add_argument('--config', action='store', dest='config', default="config.yaml", help='Global config file (default: config.yaml)')
parser.add_argument('--debug', action='store_true', dest='debug', help='Suppresses alerts and prints information instead')
parser.add_argument('--rule', dest='rule', help='Run only a specific rule (by filename, must still be in rules folder)')
parser.add_argument('--silence', dest='silence', help='Silence rule for a time period. Must be used with --rule. Usage: '
'--silence <units>=<number>, eg. --silence hours=2')
parser.add_argument('--start', dest='start', help='YYYY-MM-DDTHH:MM:SS Start querying from this timestamp. (Default: present)')
parser.add_argument('--end', dest='end', help='YYYY-MM-DDTHH:MM:SS Query to this timestamp. (Default: present)')
parser.add_argument('--verbose', action='store_true', dest='verbose', help='Increase verbosity without suppressing alerts')
parser.add_argument('--pin_rules', action='store_true', dest='pin_rules', help='Stop ElastAlert from monitoring config file changes')
self.args = parser.parse_args(args)
def __init__(self, args):
self.parse_args(args)
self.conf = load_rules(self.args.config, use_rule=self.args.rule)
self.max_query_size = self.conf['max_query_size']
self.rules = self.conf['rules']
self.debug = self.args.debug
self.verbose = self.args.verbose
self.writeback_index = self.conf['writeback_index']
self.run_every = self.conf['run_every']
self.alert_time_limit = self.conf['alert_time_limit']
self.old_query_limit = self.conf['old_query_limit']
self.alerts_sent = 0
self.num_hits = 0
self.current_es = None
self.current_es_addr = None
self.buffer_time = self.conf['buffer_time']
self.silence_cache = {}
self.rule_hashes = get_rule_hashes(self.conf, self.args.rule)
self.starttime = self.args.start
self.es_conn_config = self.build_es_conn_config(self.conf)
self.writeback_es = self.new_elasticsearch(self.es_conn_config)
if self.debug:
self.verbose = True
if self.verbose:
logging.getLogger().setLevel(logging.INFO)
for rule in self.rules:
rule = self.init_rule(rule)
if self.args.silence:
self.silence()
@staticmethod
def new_elasticsearch(es_conn_conf):
""" returns an Elasticsearch instance configured using an es_conn_config """
return Elasticsearch(host=es_conn_conf['es_host'],
port=es_conn_conf['es_port'],
use_ssl=es_conn_conf['use_ssl'],
http_auth=es_conn_conf['http_auth'])
@staticmethod
def build_es_conn_config(conf):
""" Given a conf dictionary w/ raw config properties 'use_ssl', 'es_host', 'es_port'
'es_username' and 'es_password', this will return a new dictionary
with properly initialized values for 'es_host', 'es_port', 'use_ssl' and 'http_auth' which
will be a basicauth username:password formatted string """
parsed_conf = {}
parsed_conf['use_ssl'] = False
parsed_conf['http_auth'] = None
parsed_conf['es_username'] = None
parsed_conf['es_password'] = None
parsed_conf['es_host'] = conf['es_host']
parsed_conf['es_port'] = conf['es_port']
if 'es_username' in conf:
parsed_conf['es_username'] = conf['es_username']
parsed_conf['es_password'] = conf['es_password']
if parsed_conf['es_username'] and parsed_conf['es_password']:
parsed_conf['http_auth'] = parsed_conf['es_username'] + ':' + parsed_conf['es_password']
if 'use_ssl' in conf:
parsed_conf['use_ssl'] = conf['use_ssl']
return parsed_conf
@staticmethod
def get_index(rule, starttime=None, endtime=None):
""" Gets the index for a rule. If strftime is set and starttime and endtime
are provided, it will return a comma seperated list of indices. If strftime
is set but starttime and endtime are not provided, it will replace all format
tokens with a wildcard. """
index = rule['index']
if rule.get('use_strftime_index'):
if starttime and endtime:
return format_index(index, starttime, endtime)
else:
# Replace the substring containing format characters with a *
format_start = index.find('%')
format_end = index.rfind('%') + 2
return index[:format_start] + '*' + index[format_end:]
else:
return index
@staticmethod
def get_query(filters, starttime=None, endtime=None, sort=True, timestamp_field='@timestamp'):
""" Returns a query dict that will apply a list of filters, filter by
start and end time, and sort results by timestamp.
:param filters: A list of elasticsearch filters to use.
:param starttime: A timestamp to use as the start time of the query.
:param endtime: A timestamp to use as the end time of the query.
:param sort: If true, sort results by timestamp. (Default True)
:return: A query dictionary to pass to elasticsearch.
"""
starttime = dt_to_ts(starttime)
endtime = dt_to_ts(endtime)
filters = copy.copy(filters)
query = {'filter': {'bool': {'must': filters}}}
if starttime and endtime:
query['filter']['bool']['must'].append({'range': {timestamp_field: {'from': starttime,
'to': endtime}}})
if sort:
query['sort'] = [{timestamp_field: {'order': 'asc'}}]
return query
def get_terms_query(self, query, size, field):
""" Takes a query generated by get_query and outputs a aggregation query """
if 'sort' in query:
query.pop('sort')
query.update({'aggs': {'counts': {'terms': {'field': field, 'size': size}}}})
aggs_query = {'aggs': {'filtered': query}}
return aggs_query
def get_index_start(self, index, timestamp_field='@timestamp'):
""" Query for one result sorted by timestamp to find the beginning of the index.
:param index: The index of which to find the earliest event.
:return: Timestamp of the earliest event.
"""
query = {'sort': {timestamp_field: {'order': 'asc'}}}
try:
res = self.current_es.search(index=index, size=1, body=query, _source_include=[timestamp_field], ignore_unavailable=True)
except ElasticsearchException as e:
self.handle_error("Elasticsearch query error: %s" % (e), {'index': index})
return '1969-12-30T00:00:00Z'
if len(res['hits']['hits']) == 0:
# Index is completely empty, return a date before the epoch
return '1969-12-30T00:00:00Z'
timestamp = res['hits']['hits'][0]['_source'][timestamp_field]
return timestamp
def get_hits(self, rule, starttime, endtime, index):
""" Query elasticsearch for the given rule and return the results.
:param rule: The rule configuration.
:param starttime: The earliest time to query.
:param endtime: The latest time to query.
:return: A list of hits, bounded by self.max_query_size.
"""
query = self.get_query(rule['filter'], starttime, endtime, timestamp_field=rule['timestamp_field'])
try:
res = self.current_es.search(index=index, size=self.max_query_size, body=query, _source_include=rule['include'], ignore_unavailable=True)
except ElasticsearchException as e:
# Elasticsearch sometimes gives us GIGANTIC error messages
# (so big that they will fill the entire terminal buffer)
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
self.handle_error('Error running query: %s' % (e), {'rule': rule['name']})
return None
hits = res['hits']['hits']
self.num_hits += len(hits)
lt = rule.get('use_local_time')
logging.info("Queried rule %s from %s to %s: %s hits" % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), len(hits)))
replace_hits_ts(hits, rule)
# Record doc_type for use in get_top_counts
if 'doc_type' not in rule and len(hits):
rule['doc_type'] = hits[0]['_type']
return hits
def get_hits_count(self, rule, starttime, endtime, index):
""" Query elasticsearch for the count of results and returns a list of timestamps
equal to the endtime. This allows the results to be passed to rules which expect
an object for each hit.
:param rule: The rule configuration dictionary.
:param starttime: The earliest time to query.
:param endtime: The latest time to query.
:return: A dictionary mapping timestamps to number of hits for that time period.
"""
query = self.get_query(rule['filter'], starttime, endtime, timestamp_field=rule['timestamp_field'], sort=False)
query = {'query': {'filtered': query}}
try:
res = self.current_es.count(index=index, doc_type=rule['doc_type'], body=query, ignore_unavailable=True)
except ElasticsearchException as e:
# Elasticsearch sometimes gives us GIGANTIC error messages
# (so big that they will fill the entire terminal buffer)
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
self.handle_error('Error running count query: %s' % (e), {'rule': rule['name']})
return None
self.num_hits += res['count']
lt = rule.get('use_local_time')
logging.info("Queried rule %s from %s to %s: %s hits" % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), res['count']))
return {endtime: res['count']}
def get_hits_terms(self, rule, starttime, endtime, index, key, qk=None, size=None):
rule_filter = copy.copy(rule['filter'])
if qk:
filter_key = rule['query_key']
if rule.get('raw_count_keys', True) and not rule['query_key'].endswith('.raw'):
filter_key += '.raw'
rule_filter.extend([{'term': {filter_key: qk}}])
base_query = self.get_query(rule_filter, starttime, endtime, timestamp_field=rule['timestamp_field'], sort=False)
if size is None:
size = rule.get('terms_size', 50)
query = self.get_terms_query(base_query, size, key)
try:
res = self.current_es.search(index=index, doc_type=rule['doc_type'], body=query, search_type='count', ignore_unavailable=True)
except ElasticsearchException as e:
# Elasticsearch sometimes gives us GIGANTIC error messages
# (so big that they will fill the entire terminal buffer)
if len(str(e)) > 1024:
e = str(e)[:1024] + '... (%d characters removed)' % (len(str(e)) - 1024)
self.handle_error('Error running query: %s' % (e), {'rule': rule['name']})
return None
if 'aggregations' not in res:
return {}
buckets = res['aggregations']['filtered']['counts']['buckets']
self.num_hits += len(buckets)
lt = rule.get('use_local_time')
logging.info('Queried rule %s from %s to %s: %s buckets' % (rule['name'], pretty_ts(starttime, lt), pretty_ts(endtime, lt), len(buckets)))
return {endtime: buckets}
def remove_duplicate_events(self, data, rule):
# Remove data we've processed already
data = [event for event in data if event['_id'] not in rule['processed_hits']]
# Remember the new data's IDs
for event in data:
rule['processed_hits'][event['_id']] = event['_source'][rule['timestamp_field']]
return [event['_source'] for event in data]
def remove_old_events(self, rule):
# Anything older than the buffer time we can forget
now = ts_now()
remove = []
buffer_time = rule.get('buffer_time', self.buffer_time)
for _id, timestamp in rule['processed_hits'].iteritems():
if now - timestamp > buffer_time:
remove.append(_id)
map(rule['processed_hits'].pop, remove)
def run_query(self, rule, start=None, end=None):
""" Query for the rule and pass all of the results to the RuleType instance.
:param rule: The rule configuration.
:param start: The earliest time to query.
:param end: The latest time to query.
Returns True on success and False on failure.
"""
if start is None:
start = self.get_index_start(rule['index'])
if end is None:
end = ts_now()
# Reset hit counter and query
rule_inst = rule['type']
prev_num_hits = self.num_hits
max_size = rule.get('max_query_size', self.max_query_size)
index = self.get_index(rule, start, end)
if rule.get('use_count_query'):
data = self.get_hits_count(rule, start, end, index)
elif rule.get('use_terms_query'):
data = self.get_hits_terms(rule, start, end, index, rule['query_key'])
else:
data = self.get_hits(rule, start, end, index)
if data:
data = self.remove_duplicate_events(data, rule)
# There was an exception while querying
if data is None:
return False
elif data:
if rule.get('use_count_query'):
rule_inst.add_count_data(data)
elif rule.get('use_terms_query'):
rule_inst.add_terms_data(data)
else:
rule_inst.add_data(data)
# Warn if we hit max_query_size
if self.num_hits - prev_num_hits == max_size and not rule.get('use_count_query'):
logging.warning("Hit max_query_size (%s) while querying for %s" % (max_size, rule['name']))
return True
def get_starttime(self, rule):
""" Query ES for the last time we ran this rule.
:param rule: The rule configuration.
:return: A timestamp or None.
"""
query = {'filter': {'term': {'rule_name': '%s' % (rule['name'])}},
'sort': {'@timestamp': {'order': 'desc'}}}
try:
if self.writeback_es:
res = self.writeback_es.search(index=self.writeback_index, doc_type='elastalert_status',
size=1, body=query, _source_include=['endtime', 'rule_name'])
if res['hits']['hits']:
endtime = ts_to_dt(res['hits']['hits'][0]['_source']['endtime'])
if ts_now() - endtime < self.old_query_limit:
return endtime
else:
logging.info("Found expired previous run for %s at %s" % (rule['name'], endtime))
return None
except (ElasticsearchException, KeyError) as e:
self.handle_error('Error querying for last run: %s' % (e), {'rule': rule['name']})
self.writeback_es = None
return None
def set_starttime(self, rule, endtime):
""" Given a rule and an endtime, sets the appropriate starttime for it. """
# This means we are starting fresh
if 'starttime' not in rule:
# Try to get the last run from elasticsearch
last_run_end = self.get_starttime(rule)
if last_run_end:
rule['starttime'] = last_run_end
return
# Use buffer for normal queries, or run_every increments otherwise
buffer_time = rule.get('buffer_time', self.buffer_time)
if not rule.get('use_count_query') and not rule.get('use_terms_query'):
rule['starttime'] = endtime - buffer_time
else:
rule['starttime'] = endtime - self.run_every
def run_rule(self, rule, endtime, starttime=None):
""" Run a rule for a given time period, including querying and alerting on results.
:param rule: The rule configuration.
:param starttime: The earliest timestamp to query.
:param endtime: The latest timestamp to query.
:return: The number of matches that the rule produced.
"""
run_start = time.time()
rule_es_conn_config = self.build_es_conn_config(rule)
self.current_es = self.new_elasticsearch(rule_es_conn_config)
self.current_es_addr = (rule['es_host'], rule['es_port'])
# If there are pending aggregate matches, try processing them
for x in range(len(rule['agg_matches'])):
match = rule['agg_matches'].pop()
self.add_aggregated_alert(match, rule)
# Start from provided time if it's given
if starttime:
rule['starttime'] = starttime
else:
self.set_starttime(rule, endtime)
rule['original_starttime'] = rule['starttime']
# Don't run if starttime was set to the future
if ts_now() <= rule['starttime']:
logging.warning("Attempted to use query start time in the future (%s), sleeping instead" % (starttime))
return 0
# Run the rule
# If querying over a large time period, split it up into chunks
self.num_hits = 0
buffer_time = rule.get('buffer_time', self.buffer_time)
while endtime - rule['starttime'] > buffer_time:
tmp_endtime = rule['starttime'] + self.run_every
if not self.run_query(rule, rule['starttime'], tmp_endtime):
return 0
rule['starttime'] = tmp_endtime
rule['type'].garbage_collect(tmp_endtime)
if not self.run_query(rule, rule['starttime'], endtime):
return 0
rule['type'].garbage_collect(endtime)
# Process any new matches
num_matches = len(rule['type'].matches)
while rule['type'].matches:
match = rule['type'].matches.pop(0)
# If realert is set, silence the rule for that duration
# Silence is cached by query_key, if it exists
# Default realert time is 0 seconds
# concatenate query_key (or none) with rule_name to form silence_cache key
if 'query_key' in rule:
try:
key = '.' + str(match[rule['query_key']])
except KeyError:
# Some matches may not have a query key
key = ''
else:
key = ''
if self.is_silenced(rule['name'] + key) or self.is_silenced(rule['name']):
logging.info('Ignoring match for silenced rule %s%s' % (rule['name'], key))
continue
if rule['realert']:
next_alert, exponent = self.next_alert_time(rule, rule['name'] + key, ts_now())
self.set_realert(rule['name'] + key, next_alert, exponent)
# If no aggregation, alert immediately
if not rule['aggregation']:
self.alert([match], rule)
continue
# Add it as an aggregated match
self.add_aggregated_alert(match, rule)
time_taken = time.time() - run_start
# Write to ES that we've run this rule against this time period
body = {'rule_name': rule['name'],
'endtime': endtime,
'starttime': rule['starttime'],
'matches': num_matches,
'hits': self.num_hits,
'@timestamp': ts_now(),
'time_taken': time_taken}
self.writeback('elastalert_status', body)
return num_matches
def init_rule(self, new_rule, new=True):
''' Copies some necessary non-config state from an exiting rule to a new rule. '''
if 'download_dashboard' in new_rule['filter']:
# Download filters from kibana and set the rules filters to them
db_filters = self.filters_from_kibana(new_rule, new_rule['filter']['download_dashboard'])
if db_filters is not None:
new_rule['filter'] = db_filters
else:
raise EAException("Could not download filters from %s" % (new_rule['filter']['download_dashboard']))
blank_rule = {'agg_matches': [],
'current_aggregate_id': None,
'processed_hits': {}}
rule = blank_rule
# Set rule to either a blank template or existing rule with same name
if not new:
for rule in self.rules:
if rule['name'] == new_rule['name']:
break
else:
logging.warning("Couldn't find existing rule %s, starting from scratch" % (new_rule['name']))
rule = blank_rule
copy_properties = ['agg_matches',
'current_aggregate_id',
'processed_hits',
'starttime']
for prop in copy_properties:
if prop == 'starttime' and 'starttime' not in rule:
continue
new_rule[prop] = rule[prop]
return new_rule
def load_rule_changes(self):
''' Using the modification times of rule config files, syncs the running rules
to match the files in rules_folder by removing, adding or reloading rules. '''
rule_hashes = get_rule_hashes(self.conf, self.args.rule)
# Check each current rule for changes
for rule_file, hash_value in self.rule_hashes.iteritems():
if rule_file not in rule_hashes:
# Rule file was deleted
logging.info('Rule file %s not found, stopping rule execution' % (rule_file))
self.rules = [rule for rule in self.rules if rule['rule_file'] != rule_file]
continue
if hash_value != rule_hashes[rule_file]:
# Rule file was changed, reload rule
try:
new_rule = load_configuration(os.path.join(self.conf['rules_folder'], rule_file))
except EAException as e:
self.handle_error('Could not load rule %s: %s' % (rule_file, e))
continue
logging.info("Reloading configuration for rule %s" % (rule_file))
# Initialize the rule that matches rule_file
self.rules = [rule if rule['rule_file'] != rule_file else self.init_rule(new_rule, False) for rule in self.rules]
# Load new rules
if not self.args.rule:
for rule_file in set(rule_hashes.keys()) - set(self.rule_hashes.keys()):
try:
new_rule = load_configuration(os.path.join(self.conf['rules_folder'], rule_file))
except EAException as e:
self.handle_error('Could not load rule %s: %s' % (rule_file, e))
continue
logging.info('Loaded new rule %s' % (rule_file))
self.rules.append(self.init_rule(new_rule))
self.rule_hashes = rule_hashes
def start(self):
""" Periodically go through each rule and run it """
if self.starttime:
try:
self.starttime = ts_to_dt(self.starttime)
except (TypeError, ValueError):
self.handle_error("%s is not a valid ISO 8601 timestamp (YYYY-MM-DDTHH:MM:SS+XX:00)" % (self.starttime))
exit(1)
self.running = True
while self.running:
next_run = datetime.datetime.utcnow() + self.run_every
self.run_all_rules()
if next_run < datetime.datetime.utcnow():
continue
# Wait before querying again
sleep_duration = (next_run - datetime.datetime.utcnow()).seconds
self.sleep_for(sleep_duration)
def run_all_rules(self):
""" Run each rule one time """
# If writeback_es errored, it's disabled until the next query cycle
if not self.writeback_es:
self.writeback_es = self.new_elasticsearch(self.es_conn_config)
self.send_pending_alerts()
next_run = datetime.datetime.utcnow() + self.run_every
for rule in self.rules:
# Set endtime based on the rule's delay
delay = rule.get('query_delay')
if hasattr(self.args, 'end') and self.args.end:
endtime = ts_to_dt(self.args.end)
elif delay:
endtime = ts_now() - delay
else:
endtime = ts_now()
try:
num_matches = self.run_rule(rule, endtime, self.starttime)
except EAException as e:
self.handle_error("Error running rule %s: %s" % (rule['name'], e), {'rule': rule['name']})
else:
old_starttime = pretty_ts(rule.get('original_starttime'), rule.get('use_local_time'))
logging.info("Ran %s from %s to %s: %s query hits, %s matches,"
" %s alerts sent" % (rule['name'], old_starttime, pretty_ts(endtime, rule.get('use_local_time')),
self.num_hits, num_matches, self.alerts_sent))
self.alerts_sent = 0
self.remove_old_events(rule)
if next_run < datetime.datetime.utcnow():
# We were processing for longer than our refresh interval
# This can happen if --start was specified with a large time period
# or if we are running too slow to process events in real time.
logging.warning("Querying from %s to %s took longer than %s!" % (old_starttime, endtime, self.run_every))
# Only force starttime once
self.starttime = None
if not self.args.pin_rules:
self.load_rule_changes()
def stop(self):
""" Stop an elastalert runner that's been started """
self.running = False
def sleep_for(self, duration):
""" Sleep for a set duration """
logging.info("Sleeping for %s seconds" % (duration))
time.sleep(duration)
def generate_kibana_db(self, rule, match):
''' Uses a template dashboard to upload a temp dashboard showing the match.
Returns the url to the dashboard. '''
db = copy.deepcopy(kibana.dashboard_temp)
# Set filters
for filter in rule['filter']:
if filter:
kibana.add_filter(db, filter)
kibana.set_included_fields(db, rule['include'])
# Set index
index = self.get_index(rule)
kibana.set_index_name(db, index)
return self.upload_dashboard(db, rule, match)
def upload_dashboard(self, db, rule, match):
''' Uploads a dashboard schema to the kibana-int elasticsearch index associated with rule.
Returns the url to the dashboard. '''
# Set time range
start = ts_add(match[rule['timestamp_field']], -rule.get('timeframe', datetime.timedelta(minutes=10)))
end = ts_add(match[rule['timestamp_field']], datetime.timedelta(minutes=10))
kibana.set_time(db, start, end)
# Set dashboard name
db_name = 'ElastAlert - %s - %s' % (rule['name'], end)
kibana.set_name(db, db_name)
# Add filter for query_key value
if 'query_key' in rule:
if rule['query_key'] in match:
term = {'term': {rule['query_key']: match[rule['query_key']]}}
kibana.add_filter(db, term)
# Convert to json
db_js = json.dumps(db)
db_body = {'user': 'guest',
'group': 'guest',
'title': db_name,
'dashboard': db_js}
# Upload
rule_es_conn_config = self.build_es_conn_config(rule)
es = self.new_elasticsearch(rule_es_conn_config)
res = es.create(index='kibana-int',
doc_type='temp',
body=db_body)
# Return dashboard URL
kibana_url = rule.get('kibana_dashboard')
if not kibana_url:
kibana_url = 'http://%s:%s/_plugin/kibana/' % (rule['es_host'],
rule['es_port'])
return kibana_url + '#/dashboard/temp/%s' % (res['_id'])
def get_dashboard(self, rule, db_name):
""" Download dashboard which matches use_kibana_dashboard from elasticsearch. """
rule_es_conn_config = self.build_es_conn_config(rule)
es = self.new_elasticsearch(rule_es_conn_config)
if not db_name:
raise EAException("use_kibana_dashboard undefined")
query = {'query': {'term': {'_id': db_name}}}
try:
res = es.search(index='kibana-int', doc_type='dashboard', body=query, _source_include=['dashboard'])
except ElasticsearchException as e:
raise EAException("Error querying for dashboard: %s" % (e))
if res['hits']['hits']:
return json.loads(res['hits']['hits'][0]['_source']['dashboard'])
else:
raise EAException("Could not find dashboard named %s" % (db_name))
def use_kibana_link(self, rule, match):
""" Uploads an existing dashboard as a temp dashboard modified for match time.
Returns the url to the dashboard. """
# Download or get cached dashboard
dashboard = rule.get('dashboard_schema')
if not dashboard:
db_name = rule.get('use_kibana_dashboard')
dashboard = self.get_dashboard(rule, db_name)
if dashboard:
rule['dashboard_schema'] = dashboard
else:
return None
dashboard = copy.deepcopy(dashboard)
return self.upload_dashboard(dashboard, rule, match)
def filters_from_kibana(self, rule, db_name):
""" Downloads a dashboard from kibana and returns corresponding filters, None on error. """
try:
db = rule.get('dashboard_schema')
if not db:
db = self.get_dashboard(rule, db_name)
filters = kibana.filters_from_dashboard(db)
except EAException:
return None
return filters
def alert(self, matches, rule, alert_time=None):
""" Send out an alert.
:param matches: A list of matches.
:param rule: A rule configuration.
"""
if alert_time is None:
alert_time = ts_now()
# Compute top count keys
if rule.get('top_count_keys'):
for match in matches:
if 'query_key' in rule and rule['query_key'] in match:
qk = match[rule['query_key']]
else:
qk = None
start = ts_to_dt(match[rule['timestamp_field']]) - rule.get('timeframe', datetime.timedelta(minutes=10))
end = ts_to_dt(match[rule['timestamp_field']]) + datetime.timedelta(minutes=10)
keys = rule.get('top_count_keys')
counts = self.get_top_counts(rule, start, end, keys, rule.get('top_count_number'), qk)
match.update(counts)
# Generate a kibana dashboard for the first match
if rule.get('generate_kibana_link') or rule.get('use_kibana_dashboard'):
try:
if rule.get('generate_kibana_link'):
kb_link = self.generate_kibana_db(rule, matches[0])
else:
kb_link = self.use_kibana_link(rule, matches[0])
except EAException as e:
self.handle_error("Could not generate kibana dash for %s match: %s" % (rule['name'], e))
else:
if kb_link:
matches[0]['kibana_link'] = kb_link
for enhancement in rule['match_enhancements']:
for match in matches:
try:
enhancement.process(match)
except EAException as e:
self.handle_error("Error running match enhancement: %s" % (e), {'rule': rule['name']})
# Don't send real alerts in debug mode
if self.debug:
alerter = DebugAlerter(rule)
alerter.alert(matches)
return
# Run the alerts
alert_sent = False
alert_exception = None
alert_pipeline = {}
for alert in rule['alert']:
# Alert.pipeline is a single object shared between every alerter
# This allows alerters to pass objects and data between themselves
alert.pipeline = alert_pipeline
try:
alert.alert(matches)
except EAException as e:
self.handle_error('Error while running alert %s: %s' % (alert.get_info()['type'], e), {'rule': rule['name']})
alert_exception = str(e)
else:
self.alerts_sent += 1
alert_sent = True
# Write the alert(s) to ES
agg_id = None
for match in matches:
alert_body = self.get_alert_body(match, rule, alert_sent, alert_time, alert_exception)
# Set all matches to aggregate together
if agg_id:
alert_body['aggregate_id'] = agg_id
res = self.writeback('elastalert', alert_body)
if res and not agg_id:
agg_id = res['_id']
def get_alert_body(self, match, rule, alert_sent, alert_time, alert_exception=None):
body = {'match_body': match}
body['rule_name'] = rule['name']
# TODO record info about multiple alerts
body['alert_info'] = rule['alert'][0].get_info()
body['alert_sent'] = alert_sent
body['alert_time'] = alert_time
# If the alert failed to send, record the exception
if not alert_sent:
body['alert_exception'] = alert_exception
return body
def writeback(self, doc_type, body):
# Convert any datetime objects to timestamps
for key in body.keys():
if isinstance(body[key], datetime.datetime):
body[key] = dt_to_ts(body[key])
if self.debug:
logging.info("Skipping writing to ES: %s" % (body))
return None
if '@timestamp' not in body:
body['@timestamp'] = dt_to_ts(ts_now())
if self.writeback_es:
try:
res = self.writeback_es.create(index=self.writeback_index,
doc_type=doc_type, body=body)
return res
except ElasticsearchException as e:
logging.exception("Error writing alert info to elasticsearch: %s" % (e))
self.writeback_es = None
return None
def find_recent_pending_alerts(self, time_limit):
""" Queries writeback_es to find alerts that did not send
and are newer than time_limit """
query = {'query': {'query_string': {'query': 'alert_sent:false'}},
'filter': {'range': {'alert_time': {'from': dt_to_ts(ts_now() - time_limit),
'to': dt_to_ts(ts_now())}}}}
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index,
doc_type='elastalert',
body=query,
size=1000)
if res['hits']['hits']:
return res['hits']['hits']
except:
pass
return []
def send_pending_alerts(self):
pending_alerts = self.find_recent_pending_alerts(self.alert_time_limit)
for alert in pending_alerts:
_id = alert['_id']
alert = alert['_source']
try:
rule_name = alert.pop('rule_name')
alert_time = alert.pop('alert_time')
match_body = alert.pop('match_body')
except KeyError:
# Malformed alert, drop it
continue
agg_id = alert.get('aggregate_id', None)
if agg_id:
# Aggregated alerts will be taken care of by get_aggregated_matches
continue
# Find original rule
for rule in self.rules:
if rule['name'] == rule_name:
break
else:
# Original rule is missing, drop alert
continue
# Retry the alert unless it's a future alert
if ts_now() > ts_to_dt(alert_time):
aggregated_matches = self.get_aggregated_matches(_id)
if aggregated_matches:
matches = [match_body] + [agg_match['match_body'] for agg_match in aggregated_matches]
self.alert(matches, rule, alert_time=alert_time)
rule['current_aggregate_id'] = None
else:
self.alert([match_body], rule, alert_time=alert_time)
# Delete it from the index
try:
self.writeback_es.delete(index=self.writeback_index,
doc_type='elastalert',
id=_id)
except:
self.handle_error("Failed to delete alert %s at %s" % (_id, alert_time))
# Send in memory aggregated alerts
for rule in self.rules:
if rule['agg_matches']:
if ts_now() > rule['aggregate_alert_time']:
self.alert(rule['agg_matches'], rule)
rule['agg_matches'] = []
def get_aggregated_matches(self, _id):
""" Removes and returns all matches from writeback_es that have aggregate_id == _id """
query = {'query': {'query_string': {'query': 'aggregate_id:%s' % (_id)}}}
matches = []
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index,
doc_type='elastalert',
body=query)
for match in res['hits']['hits']:
matches.append(match['_source'])
self.writeback_es.delete(index=self.writeback_index,
doc_type='elastalert',
id=match['_id'])
except (KeyError, ElasticsearchException) as e:
self.handle_error("Error fetching aggregated matches: %s" % (e), {'id': _id})
return matches
def add_aggregated_alert(self, match, rule):
""" Save a match as a pending aggregate alert to elasticsearch. """
if not rule['current_aggregate_id'] or rule['aggregate_alert_time'] < ts_to_dt(match[rule['timestamp_field']]):
# First match, set alert_time
match_time = ts_to_dt(match[rule['timestamp_field']])
alert_time = match_time + rule['aggregation']
rule['aggregate_alert_time'] = alert_time
agg_id = None
else:
# Already pending aggregation, use existing alert_time
alert_time = rule['aggregate_alert_time']
agg_id = rule['current_aggregate_id']
logging.info('Adding alert for %s to aggregation, next alert at %s' % (rule['name'], alert_time))
alert_body = self.get_alert_body(match, rule, False, alert_time)
if agg_id:
alert_body['aggregate_id'] = agg_id
res = self.writeback('elastalert', alert_body)
# If new aggregation, save _id
if res and not agg_id:
rule['current_aggregate_id'] = res['_id']
# Couldn't write the match to ES, save it in memory for now
if not res:
rule['agg_matches'].append(match)
return res
def silence(self):
""" Silence an alert for a period of time. --silence and --rule must be passed as args. """
if self.debug:
logging.error('--silence not compatible with --debug')
exit(1)
if not self.args.rule:
logging.error('--silence must be used with --rule')
exit(1)
# With --rule, self.rules will only contain that specific rule
rule_name = self.rules[0]['name']
try:
unit, num = self.args.silence.split('=')
silence_time = datetime.timedelta(**{unit: int(num)})
# Double conversion to add tzinfo
silence_ts = ts_to_dt(dt_to_ts(silence_time + datetime.datetime.utcnow()))
except (ValueError, TypeError):
logging.error('%s is not a valid time period' % (self.args.silence))
exit(1)
if not self.set_realert(rule_name, silence_ts, 0):
logging.error('Failed to save silence command to elasticsearch')
exit(1)
logging.info('Success. %s will be silenced until %s' % (rule_name, silence_ts))
def set_realert(self, rule_name, timestamp, exponent):
""" Write a silence to elasticsearch for rule_name until timestamp. """
body = {'exponent': exponent,
'rule_name': rule_name,
'@timestamp': ts_now(),
'until': timestamp}
self.silence_cache[rule_name] = (timestamp, exponent)
return self.writeback('silence', body)
def is_silenced(self, rule_name):
""" Checks if rule_name is currently silenced. Returns false on exception. """
if rule_name in self.silence_cache:
if ts_now() < self.silence_cache[rule_name][0]:
return True
else:
return False
if self.debug:
return False
query = {'filter': {'term': {'rule_name': rule_name}},
'sort': {'until': {'order': 'desc'}}}
if self.writeback_es:
try:
res = self.writeback_es.search(index=self.writeback_index, doc_type='silence',
size=1, body=query, _source_include=['until', 'exponent'])
except ElasticsearchException as e:
self.handle_error("Error while querying for alert silence status: %s" % (e), {'rule': rule_name})
return False
if res['hits']['hits']:
until_ts = res['hits']['hits'][0]['_source']['until']
exponent = res['hits']['hits'][0]['_source'].get('exponent', 0)
self.silence_cache[rule_name] = (ts_to_dt(until_ts), exponent)
if ts_now() < ts_to_dt(until_ts):
return True
return False
def handle_error(self, message, data=None):
''' Logs message at error level and writes message, data and traceback to Elasticsearch. '''
if not self.writeback_es:
self.writeback_es = self.new_elasticsearch(self.es_conn_config)
logging.error(message)
body = {'message': message}
tb = traceback.format_exc()
body['traceback'] = tb.strip().split('\n')
if data:
body['data'] = data
self.writeback('elastalert_error', body)
def get_top_counts(self, rule, starttime, endtime, keys, number=5, qk=None):
""" Counts the number of events for each unique value for each key field.
Returns a dictionary with top_events_<key> mapped to the top 5 counts for each key. """
all_counts = {}
for key in keys:
index = self.get_index(rule, starttime, endtime)
buckets = self.get_hits_terms(rule, starttime, endtime, index, key, qk, number).values()[0]
# get_hits_terms adds to num_hits, but we don't want to count these
self.num_hits -= len(buckets)
terms = {}
for bucket in buckets:
terms[bucket['key']] = bucket['doc_count']
counts = terms.items()
counts.sort(key=lambda x: x[1], reverse=True)
# Save a dict with the top 5 events by key
all_counts['top_events_%s' % (key)] = dict(counts[:number])
return all_counts
def next_alert_time(self, rule, name, timestamp):
""" Calculate an 'until' time and exponent based on how much past the last 'until' we are. """
if name in self.silence_cache:
last_until, exponent = self.silence_cache[name]
else:
# If this isn't cached, this is the first alert or writeback_es is down, normal realert
return timestamp + rule['realert'], 0
if not rule.get('exponential_realert'):
return timestamp + rule['realert'], 0
diff = seconds(timestamp - last_until)
# Increase exponent if we've alerted recently
if diff < seconds(rule['realert']) * 2 ** exponent:
exponent += 1
else:
# Continue decreasing exponent the longer it's been since the last alert
while diff > seconds(rule['realert']) * 2 ** exponent and exponent > 0:
diff -= seconds(rule['realert']) * 2 ** exponent
exponent -= 1
wait = datetime.timedelta(seconds=seconds(rule['realert']) * 2 ** exponent)
if wait >= rule['exponential_realert']:
return timestamp + rule['exponential_realert'], exponent - 1
return timestamp + wait, exponent
if __name__ == '__main__':
client = ElastAlerter(sys.argv[1:])
if not client.args.silence:
client.start()
|
|
# -*- coding: utf-8 -*-
'''
This module contains code to create XMLContent objects from the Weblyzard API
JSON format.
.. moduleauthor:: Fabian Fischer [email protected]
'''
from __future__ import unicode_literals
from weblyzard_api.model.parsers.xml_2013 import XML2013
from weblyzard_api.model.parsers import JSONParserBase
from weblyzard_api.model.exceptions import MalformedJSONException
from weblyzard_api.model.xml_content import XMLContent
from weblyzard_api.model import Sentence
class JSON10ParserXMLContent(JSONParserBase):
'''
This class is the parser class for JSON documents conforming to
the Weblyzard API 1.0 definition.
'''
FIELDS_REQUIRED = ['uri']
FIELDS_OPTIONAL = ['title', 'language_id', 'sentences', 'content',
'features', 'relations', 'confidence'] \
+list(XML2013.ATTR_MAPPING.keys())
API_VERSION = 1.0
@classmethod
def from_api_dict(cls, api_dict):
'''
Parses a dict with a structure analoguous to the JSON format defined
in the API specification.
:param api_dict: The document to parse.
:type api_dict: dict
:returns: The parsed document as XMLContent object.
:rtype: :py:class:`weblyzard_api.model.xml_content.XMLContent`
'''
cls._check_document_format(api_dict, strict=True)
# This basically creates an empty XMLContent object
xml_content = XMLContent(xml_content=None, remove_duplicates=True)
# add all items in api_dict unless they need special handling
xml_content.update_attributes({key: value for key, value in api_dict.items() if
key not in ('sentences', 'annotations',
'language_id', 'features',
'relations', 'content')})
# parse sentences
sentences = [JSON10ParserSentence.from_api_dict(sentence_dict) for
sentence_dict in api_dict.get('sentences', [])]
xml_content.sentences = sentences
# parse annotations
annotations = [JSON10ParserAnnotation.from_api_dict(annotation_dict) for
annotation_dict in api_dict.get('annotations', [])]
xml_content.body_annotations = annotations
# add relations and features
xml_content.relations = api_dict.get('relations', {})
xml_content.features = api_dict.get('features', {})
# map the language_id to XMLContent.lang
if 'language_id' in api_dict:
xml_content.attributes['lang'] = api_dict['language_id']
# removed this: title is already set via attributes
if 'title' in api_dict:
for sentence in sentences:
if sentence.is_title and sentence.value != api_dict['title']:
raise MalformedJSONException('The sentence marked with "is_title": "True" must ' +
'match the "title" attribute.')
else:
for sentence in sentences:
if sentence.is_title:
api_dict['title'] = sentence.value
return xml_content
class JSON10ParserSentence(JSONParserBase):
'''
This class is the parser class for JSON sentences conforming to
the Weblyzard API 1.0 definition.
'''
FIELDS_REQUIRED = ['id', 'value']
FIELDS_OPTIONAL = ['is_title', 'pos_list', 'tok_list', 'dep_tree',
'sentence_number', 'paragraph_number', 'polarity',
'polarity_class', 'significance']
API_VERSION = 1.0
@classmethod
def from_api_dict(cls, api_dict):
'''
Parses a dict with a structure analoguous to the JSON format defined
in the API specification.
:param api_dict: The document to parse.
:type api_dict: dict
:returns: The parsed document as XMLContent object.
:rtype: :py:class:`weblyzard_api.model.xml_content.Sentence`
'''
cls._check_document_format(api_dict)
sentence = Sentence(
md5sum=api_dict['id'],
value=api_dict['value'],
pos=api_dict.get('pos_list', None),
sem_orient=api_dict.get('polarity', None),
significance=0.0,
token=api_dict.get('tok_list', None),
is_title=api_dict.get('is_title', False),
dependency=api_dict.get('dep_tree', None))
return sentence
class JSON10ParserAnnotation(JSONParserBase):
'''
This class is the parser class for JSON annotations conforming to
the Weblyzard API 1.0 definition.
'''
FIELDS_REQUIRED = ['start', 'end', 'surface_form', 'annotation_type']
FIELDS_OPTIONAL = ['key', 'sentence', 'confidence', 'md5sum', 'entityType',
'score', 'profileName', 'type', 'preferredName', 'surfaceForm',
'display_name', 'polarity', 'properties']
API_VERSION = 1.0
@classmethod
def from_api_dict(cls, api_dict):
'''
Parses a dict with a structure analoguous to the JSON annotation
format defined in the API specification.
For now, it just checks the dict and returns it, if it validates.
:param api_dict: The document to parse.
:type api_dict: dict
:returns: The parsed annotation as dict
:rtype: dict
'''
cls._check_document_format(api_dict)
result = dict(api_dict)
del result['annotation_type']
return result
@classmethod
def to_api_dict(cls, annotation_type, annotation):
'''
This method simply puts the annotation_type within
the annotation dict again.
:param annotation_type: The type of annotation
:type annotation_type: str
:param annotation: The annotation data
:type annotation: dict
:returns: the annotation with annotation_type set
:rtype: dict
'''
result = dict(annotation)
if 'annotationType' in result:
del result['annotationType']
result['annotation_type'] = annotation_type
return result
@classmethod
def _normalize_compact_form(cls, api_list):
result = []
for annotation in api_list:
if 'entities' in annotation:
for entity in annotation['entities']:
new_annotation = dict(annotation)
new_annotation.update(entity)
del new_annotation['entities']
if 'surfaceForm' in new_annotation:
new_annotation['surface_form'] = new_annotation['surfaceForm']
del new_annotation['surfaceForm']
result.append(new_annotation)
else:
result.append(annotation)
return result
@classmethod
def from_api_list(cls, api_list):
'''
Parses a list of annotations and returns a dict mapping the
annotations to their annotation type. I.e. each annotation
in the list individually states its type and in the output
dict this type is the key and the value are the individual
annotations of this type. E.g.
>>> api_list = [{'start': 87, \
'end': 101, \
'surface_form': 'Public Service',\
'annotation_type': 'OrganizationEntity'}]
>>> JSON10ParserAnnotation.from_api_list(api_list)
{'OrganizationEntity': [{'start': 87, 'surface_form': 'Public Service', 'end': 101}]}
:param api_list: A list of annotations.
:type api_list: list
:returns: a nested dict with the annotation types as key \
and a list of annotations as the value.
:rtype: dict
'''
result = {}
api_list = cls._normalize_compact_form(api_list)
for annotation in api_list:
cls._check_document_format(annotation)
result.setdefault(annotation['annotation_type'], [])
result.setdefault(annotation['annotation_type'], []).append(
JSON10ParserAnnotation.from_api_dict(annotation))
return result
@classmethod
def to_api_list(cls, annotations):
'''
Takes a dict that nests a list of annotations in their annotation_type
and returns a flat list of annotations where each has its
annotation_type set individually.
>>> annotations = {'OrganizationEntity': [{'start': 87, 'surface_form': 'Public Service', 'end': 101}]}
>>> JSON10ParserAnnotation.to_api_list(annotations)
[{'start': 87, 'surface_form': 'Public Service', 'end': 101, 'annotation_type': 'OrganizationEntity'}]
:param annotations: The nested dict mapping annotation_type to a list
:type annotations: dict
:returns: The flat list of annotations.
:rtype: list
'''
result = []
if not annotations:
return result
for annotation_type in annotations:
for annotation in annotations[annotation_type]:
result.append(JSON10ParserAnnotation.to_api_dict(annotation_type,
annotation))
return result
|
|
# Copyright (c) 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manage hosts in the current zone.
"""
import collections
import UserDict
from oslo.config import cfg
from nova.compute import task_states
from nova.compute import vm_states
from nova import db
from nova import exception
from nova.openstack.common.gettextutils import _
from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova.pci import pci_request
from nova.pci import pci_stats
from nova.scheduler import filters
from nova.scheduler import weights
host_manager_opts = [
cfg.MultiStrOpt('scheduler_available_filters',
default=['nova.scheduler.filters.all_filters'],
help='Filter classes available to the scheduler which may '
'be specified more than once. An entry of '
'"nova.scheduler.filters.standard_filters" '
'maps to all filters included with nova.'),
cfg.ListOpt('scheduler_default_filters',
default=[
'RetryFilter',
'AvailabilityZoneFilter',
'RamFilter',
'ComputeFilter',
'ComputeCapabilitiesFilter',
'ImagePropertiesFilter'
],
help='Which filter class names to use for filtering hosts '
'when not specified in the request.'),
cfg.ListOpt('scheduler_weight_classes',
default=['nova.scheduler.weights.all_weighers'],
help='Which weight class names to use for weighing hosts'),
]
CONF = cfg.CONF
CONF.register_opts(host_manager_opts)
LOG = logging.getLogger(__name__)
class ReadOnlyDict(UserDict.IterableUserDict):
"""A read-only dict."""
def __init__(self, source=None):
self.data = {}
self.update(source)
def __setitem__(self, key, item):
raise TypeError()
def __delitem__(self, key):
raise TypeError()
def clear(self):
raise TypeError()
def pop(self, key, *args):
raise TypeError()
def popitem(self):
raise TypeError()
def update(self, source=None):
if source is None:
return
elif isinstance(source, UserDict.UserDict):
self.data = source.data
elif isinstance(source, type({})):
self.data = source
else:
raise TypeError()
# Representation of a single metric value from a compute node.
MetricItem = collections.namedtuple(
'MetricItem', ['value', 'timestamp', 'source'])
class HostState(object):
"""Mutable and immutable information tracked for a host.
This is an attempt to remove the ad-hoc data structures
previously used and lock down access.
"""
def __init__(self, host, node, capabilities=None, service=None):
self.host = host
self.nodename = node
self.update_capabilities(capabilities, service)
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.total_usable_disk_gb = 0
self.disk_mb_used = 0
self.free_ram_mb = 0
self.free_disk_mb = 0
self.vcpus_total = 0
self.vcpus_used = 0
# Additional host information from the compute node stats:
self.vm_states = {}
self.task_states = {}
self.num_instances = 0
self.num_instances_by_project = {}
self.num_instances_by_os_type = {}
self.num_io_ops = 0
# Other information
self.host_ip = None
self.hypervisor_type = None
self.hypervisor_version = None
self.hypervisor_hostname = None
self.cpu_info = None
self.supported_instances = None
# Resource oversubscription values for the compute host:
self.limits = {}
# Generic metrics from compute nodes
self.metrics = {}
self.updated = None
def update_capabilities(self, capabilities=None, service=None):
# Read-only capability dicts
if capabilities is None:
capabilities = {}
self.capabilities = ReadOnlyDict(capabilities)
if service is None:
service = {}
self.service = ReadOnlyDict(service)
def _update_metrics_from_compute_node(self, compute):
#NOTE(llu): The 'or []' is to avoid json decode failure of None
# returned from compute.get, because DB schema allows
# NULL in the metrics column
metrics = compute.get('metrics', []) or []
if metrics:
metrics = jsonutils.loads(metrics)
for metric in metrics:
# 'name', 'value', 'timestamp' and 'source' are all required
# to be valid keys, just let KeyError happen if any one of
# them is missing. But we also require 'name' to be True.
name = metric['name']
item = MetricItem(value=metric['value'],
timestamp=metric['timestamp'],
source=metric['source'])
if name:
self.metrics[name] = item
else:
LOG.warn(_("Metric name unknown of %r") % item)
def update_from_compute_node(self, compute):
"""Update information about a host from its compute_node info."""
if (self.updated and compute['updated_at']
and self.updated > compute['updated_at']):
return
all_ram_mb = compute['memory_mb']
# Assume virtual size is all consumed by instances if use qcow2 disk.
least = compute.get('disk_available_least')
free_disk_mb = least if least is not None else compute['free_disk_gb']
free_disk_mb *= 1024
self.disk_mb_used = compute['local_gb_used'] * 1024
#NOTE(jogo) free_ram_mb can be negative
self.free_ram_mb = compute['free_ram_mb']
self.total_usable_ram_mb = all_ram_mb
self.total_usable_disk_gb = compute['local_gb']
self.free_disk_mb = free_disk_mb
self.vcpus_total = compute['vcpus']
self.vcpus_used = compute['vcpus_used']
self.updated = compute['updated_at']
if 'pci_stats' in compute:
self.pci_stats = pci_stats.PciDeviceStats(compute['pci_stats'])
else:
self.pci_stats = None
# All virt drivers report host_ip
self.host_ip = compute['host_ip']
self.hypervisor_type = compute.get('hypervisor_type')
self.hypervisor_version = compute.get('hypervisor_version')
self.hypervisor_hostname = compute.get('hypervisor_hostname')
self.cpu_info = compute.get('cpu_info')
if compute.get('supported_instances'):
self.supported_instances = jsonutils.loads(
compute.get('supported_instances'))
# Don't store stats directly in host_state to make sure these don't
# overwrite any values, or get overwritten themselves. Store in self so
# filters can schedule with them.
self.stats = self._statmap(compute.get('stats', []))
self.hypervisor_version = compute['hypervisor_version']
# Track number of instances on host
self.num_instances = int(self.stats.get('num_instances', 0))
# Track number of instances by project_id
project_id_keys = [k for k in self.stats.keys() if
k.startswith("num_proj_")]
for key in project_id_keys:
project_id = key[9:]
self.num_instances_by_project[project_id] = int(self.stats[key])
# Track number of instances in certain vm_states
vm_state_keys = [k for k in self.stats.keys() if
k.startswith("num_vm_")]
for key in vm_state_keys:
vm_state = key[7:]
self.vm_states[vm_state] = int(self.stats[key])
# Track number of instances in certain task_states
task_state_keys = [k for k in self.stats.keys() if
k.startswith("num_task_")]
for key in task_state_keys:
task_state = key[9:]
self.task_states[task_state] = int(self.stats[key])
# Track number of instances by host_type
os_keys = [k for k in self.stats.keys() if
k.startswith("num_os_type_")]
for key in os_keys:
os = key[12:]
self.num_instances_by_os_type[os] = int(self.stats[key])
self.num_io_ops = int(self.stats.get('io_workload', 0))
# update metrics
self._update_metrics_from_compute_node(compute)
def consume_from_instance(self, instance):
"""Incrementally update host state from an instance."""
disk_mb = (instance['root_gb'] + instance['ephemeral_gb']) * 1024
ram_mb = instance['memory_mb']
vcpus = instance['vcpus']
self.free_ram_mb -= ram_mb
self.free_disk_mb -= disk_mb
self.vcpus_used += vcpus
self.updated = timeutils.utcnow()
# Track number of instances on host
self.num_instances += 1
# Track number of instances by project_id
project_id = instance.get('project_id')
if project_id not in self.num_instances_by_project:
self.num_instances_by_project[project_id] = 0
self.num_instances_by_project[project_id] += 1
# Track number of instances in certain vm_states
vm_state = instance.get('vm_state', vm_states.BUILDING)
if vm_state not in self.vm_states:
self.vm_states[vm_state] = 0
self.vm_states[vm_state] += 1
# Track number of instances in certain task_states
task_state = instance.get('task_state')
if task_state not in self.task_states:
self.task_states[task_state] = 0
self.task_states[task_state] += 1
# Track number of instances by host_type
os_type = instance.get('os_type')
if os_type not in self.num_instances_by_os_type:
self.num_instances_by_os_type[os_type] = 0
self.num_instances_by_os_type[os_type] += 1
pci_requests = pci_request.get_instance_pci_requests(instance)
if pci_requests and self.pci_stats:
self.pci_stats.apply_requests(pci_requests)
vm_state = instance.get('vm_state', vm_states.BUILDING)
task_state = instance.get('task_state')
if vm_state == vm_states.BUILDING or task_state in [
task_states.RESIZE_MIGRATING, task_states.REBUILDING,
task_states.RESIZE_PREP, task_states.IMAGE_SNAPSHOT,
task_states.IMAGE_BACKUP]:
self.num_io_ops += 1
def _statmap(self, stats):
return dict((st['key'], st['value']) for st in stats)
def __repr__(self):
return ("(%s, %s) ram:%s disk:%s io_ops:%s instances:%s" %
(self.host, self.nodename, self.free_ram_mb, self.free_disk_mb,
self.num_io_ops, self.num_instances))
class HostManager(object):
"""Base HostManager class."""
# Can be overridden in a subclass
host_state_cls = HostState
def __init__(self):
# { (host, hypervisor_hostname) : { <service> : { cap k : v }}}
self.service_states = {}
self.host_state_map = {}
self.filter_handler = filters.HostFilterHandler()
self.filter_classes = self.filter_handler.get_matching_classes(
CONF.scheduler_available_filters)
self.weight_handler = weights.HostWeightHandler()
self.weight_classes = self.weight_handler.get_matching_classes(
CONF.scheduler_weight_classes)
def _choose_host_filters(self, filter_cls_names):
"""Since the caller may specify which filters to use we need
to have an authoritative list of what is permissible. This
function checks the filter names against a predefined set
of acceptable filters.
"""
if filter_cls_names is None:
filter_cls_names = CONF.scheduler_default_filters
if not isinstance(filter_cls_names, (list, tuple)):
filter_cls_names = [filter_cls_names]
cls_map = dict((cls.__name__, cls) for cls in self.filter_classes)
good_filters = []
bad_filters = []
for filter_name in filter_cls_names:
if filter_name not in cls_map:
bad_filters.append(filter_name)
continue
good_filters.append(cls_map[filter_name])
if bad_filters:
msg = ", ".join(bad_filters)
raise exception.SchedulerHostFilterNotFound(filter_name=msg)
return good_filters
def get_filtered_hosts(self, hosts, filter_properties,
filter_class_names=None, index=0):
"""Filter hosts and return only ones passing all filters."""
def _strip_ignore_hosts(host_map, hosts_to_ignore):
ignored_hosts = []
for host in hosts_to_ignore:
for (hostname, nodename) in host_map.keys():
if host == hostname:
del host_map[(hostname, nodename)]
ignored_hosts.append(host)
ignored_hosts_str = ', '.join(ignored_hosts)
msg = _('Host filter ignoring hosts: %s')
LOG.audit(msg % ignored_hosts_str)
def _match_forced_hosts(host_map, hosts_to_force):
forced_hosts = []
for (hostname, nodename) in host_map.keys():
if hostname not in hosts_to_force:
del host_map[(hostname, nodename)]
else:
forced_hosts.append(hostname)
if host_map:
forced_hosts_str = ', '.join(forced_hosts)
msg = _('Host filter forcing available hosts to %s')
else:
forced_hosts_str = ', '.join(hosts_to_force)
msg = _("No hosts matched due to not matching "
"'force_hosts' value of '%s'")
LOG.audit(msg % forced_hosts_str)
def _match_forced_nodes(host_map, nodes_to_force):
forced_nodes = []
for (hostname, nodename) in host_map.keys():
if nodename not in nodes_to_force:
del host_map[(hostname, nodename)]
else:
forced_nodes.append(nodename)
if host_map:
forced_nodes_str = ', '.join(forced_nodes)
msg = _('Host filter forcing available nodes to %s')
else:
forced_nodes_str = ', '.join(nodes_to_force)
msg = _("No nodes matched due to not matching "
"'force_nodes' value of '%s'")
LOG.audit(msg % forced_nodes_str)
filter_classes = self._choose_host_filters(filter_class_names)
ignore_hosts = filter_properties.get('ignore_hosts', [])
force_hosts = filter_properties.get('force_hosts', [])
force_nodes = filter_properties.get('force_nodes', [])
if ignore_hosts or force_hosts or force_nodes:
# NOTE(deva): we can't assume "host" is unique because
# one host may have many nodes.
name_to_cls_map = dict([((x.host, x.nodename), x) for x in hosts])
if ignore_hosts:
_strip_ignore_hosts(name_to_cls_map, ignore_hosts)
if not name_to_cls_map:
return []
# NOTE(deva): allow force_hosts and force_nodes independently
if force_hosts:
_match_forced_hosts(name_to_cls_map, force_hosts)
if force_nodes:
_match_forced_nodes(name_to_cls_map, force_nodes)
if force_hosts or force_nodes:
# NOTE(deva): Skip filters when forcing host or node
if name_to_cls_map:
return name_to_cls_map.values()
hosts = name_to_cls_map.itervalues()
return self.filter_handler.get_filtered_objects(filter_classes,
hosts, filter_properties, index)
def get_weighed_hosts(self, hosts, weight_properties):
"""Weigh the hosts."""
return self.weight_handler.get_weighed_objects(self.weight_classes,
hosts, weight_properties)
def update_service_capabilities(self, service_name, host, capabilities):
"""Update the per-service capabilities based on this notification."""
if service_name != 'compute':
LOG.debug(_('Ignoring %(service_name)s service update '
'from %(host)s'), {'service_name': service_name,
'host': host})
return
state_key = (host, capabilities.get('hypervisor_hostname'))
LOG.debug(_("Received %(service_name)s service update from "
"%(state_key)s."), {'service_name': service_name,
'state_key': state_key})
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)
capab_copy["timestamp"] = timeutils.utcnow() # Reported time
self.service_states[state_key] = capab_copy
def get_all_host_states(self, context):
"""Returns a list of HostStates that represents all the hosts
the HostManager knows about. Also, each of the consumable resources
in HostState are pre-populated and adjusted based on data in the db.
"""
# Get resource usage across the available compute nodes:
compute_nodes = db.compute_node_get_all(context)
seen_nodes = set()
for compute in compute_nodes:
service = compute['service']
if not service:
LOG.warn(_("No service for compute ID %s") % compute['id'])
continue
host = service['host']
node = compute.get('hypervisor_hostname')
state_key = (host, node)
capabilities = self.service_states.get(state_key, None)
host_state = self.host_state_map.get(state_key)
if host_state:
host_state.update_capabilities(capabilities,
dict(service.iteritems()))
else:
host_state = self.host_state_cls(host, node,
capabilities=capabilities,
service=dict(service.iteritems()))
self.host_state_map[state_key] = host_state
host_state.update_from_compute_node(compute)
seen_nodes.add(state_key)
# remove compute nodes from host_state_map if they are not active
dead_nodes = set(self.host_state_map.keys()) - seen_nodes
for state_key in dead_nodes:
host, node = state_key
LOG.info(_("Removing dead compute node %(host)s:%(node)s "
"from scheduler") % {'host': host, 'node': node})
del self.host_state_map[state_key]
return self.host_state_map.itervalues()
|
|
# Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.network import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest import test
CONF = config.CONF
class FWaaSExtensionTestJSON(base.BaseNetworkTest):
"""
Tests the following operations in the Neutron API using the REST client for
Neutron:
List firewall rules
Create firewall rule
Update firewall rule
Delete firewall rule
Show firewall rule
List firewall policies
Create firewall policy
Update firewall policy
Insert firewall rule to policy
Remove firewall rule from policy
Insert firewall rule after/before rule in policy
Update firewall policy audited attribute
Delete firewall policy
Show firewall policy
List firewall
Create firewall
Update firewall
Delete firewall
Show firewall
"""
@classmethod
def resource_setup(cls):
super(FWaaSExtensionTestJSON, cls).resource_setup()
if not test.is_extension_enabled('fwaas', 'network'):
msg = "FWaaS Extension not enabled."
raise cls.skipException(msg)
cls.fw_rule = cls.create_firewall_rule("allow", "tcp")
cls.fw_policy = cls.create_firewall_policy()
def _try_delete_policy(self, policy_id):
# delete policy, if it exists
try:
self.client.delete_firewall_policy(policy_id)
# if policy is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _try_delete_rule(self, rule_id):
# delete rule, if it exists
try:
self.client.delete_firewall_rule(rule_id)
# if rule is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
def _try_delete_firewall(self, fw_id):
# delete firewall, if it exists
try:
self.client.delete_firewall(fw_id)
# if firewall is not found, this means it was deleted in the test
except lib_exc.NotFound:
pass
self.client.wait_for_resource_deletion('firewall', fw_id)
def _wait_until_ready(self, fw_id):
target_states = ('ACTIVE', 'CREATED')
def _wait():
firewall = self.client.show_firewall(fw_id)
firewall = firewall['firewall']
return firewall['status'] in target_states
if not test.call_until_true(_wait, CONF.network.build_timeout,
CONF.network.build_interval):
m = ("Timed out waiting for firewall %s to reach %s state(s)" %
(fw_id, target_states))
raise exceptions.TimeoutException(m)
def test_list_firewall_rules(self):
# List firewall rules
fw_rules = self.client.list_firewall_rules()
fw_rules = fw_rules['firewall_rules']
self.assertIn((self.fw_rule['id'],
self.fw_rule['name'],
self.fw_rule['action'],
self.fw_rule['protocol'],
self.fw_rule['ip_version'],
self.fw_rule['enabled']),
[(m['id'],
m['name'],
m['action'],
m['protocol'],
m['ip_version'],
m['enabled']) for m in fw_rules])
def test_create_update_delete_firewall_rule(self):
# Create firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
fw_rule_id = body['firewall_rule']['id']
# Update firewall rule
body = self.client.update_firewall_rule(fw_rule_id,
shared=True)
self.assertTrue(body["firewall_rule"]['shared'])
# Delete firewall rule
self.client.delete_firewall_rule(fw_rule_id)
# Confirm deletion
fw_rules = self.client.list_firewall_rules()
self.assertNotIn(fw_rule_id,
[m['id'] for m in fw_rules['firewall_rules']])
def test_show_firewall_rule(self):
# show a created firewall rule
fw_rule = self.client.show_firewall_rule(self.fw_rule['id'])
for key, value in fw_rule['firewall_rule'].iteritems():
self.assertEqual(self.fw_rule[key], value)
def test_list_firewall_policies(self):
fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
self.assertIn((self.fw_policy['id'],
self.fw_policy['name'],
self.fw_policy['firewall_rules']),
[(m['id'],
m['name'],
m['firewall_rules']) for m in fw_policies])
def test_create_update_delete_firewall_policy(self):
# Create firewall policy
body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
# Update firewall policy
body = self.client.update_firewall_policy(fw_policy_id,
shared=True,
name="updated_policy")
updated_fw_policy = body["firewall_policy"]
self.assertTrue(updated_fw_policy['shared'])
self.assertEqual("updated_policy", updated_fw_policy['name'])
# Delete firewall policy
self.client.delete_firewall_policy(fw_policy_id)
# Confirm deletion
fw_policies = self.client.list_firewall_policies()
fw_policies = fw_policies['firewall_policies']
self.assertNotIn(fw_policy_id, [m['id'] for m in fw_policies])
def test_show_firewall_policy(self):
# show a created firewall policy
fw_policy = self.client.show_firewall_policy(self.fw_policy['id'])
fw_policy = fw_policy['firewall_policy']
for key, value in fw_policy.iteritems():
self.assertEqual(self.fw_policy[key], value)
def test_create_show_delete_firewall(self):
# Create tenant network resources required for an ACTIVE firewall
network = self.create_network()
subnet = self.create_subnet(network)
router = self.create_router(
data_utils.rand_name('router-'),
admin_state_up=True)
self.client.add_router_interface_with_subnet_id(
router['id'], subnet['id'])
# Create firewall
body = self.client.create_firewall(
name=data_utils.rand_name("firewall"),
firewall_policy_id=self.fw_policy['id'])
created_firewall = body['firewall']
firewall_id = created_firewall['id']
self.addCleanup(self._try_delete_firewall, firewall_id)
# Wait for the firewall resource to become ready
self._wait_until_ready(firewall_id)
# show a created firewall
firewall = self.client.show_firewall(firewall_id)
firewall = firewall['firewall']
for key, value in firewall.iteritems():
if key == 'status':
continue
self.assertEqual(created_firewall[key], value)
# list firewall
firewalls = self.client.list_firewalls()
firewalls = firewalls['firewalls']
self.assertIn((created_firewall['id'],
created_firewall['name'],
created_firewall['firewall_policy_id']),
[(m['id'],
m['name'],
m['firewall_policy_id']) for m in firewalls])
# Delete firewall
self.client.delete_firewall(firewall_id)
@test.attr(type='smoke')
def test_firewall_rule_insertion_position_removal_rule_from_policy(self):
# Create firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="tcp")
fw_rule_id1 = body['firewall_rule']['id']
self.addCleanup(self._try_delete_rule, fw_rule_id1)
# Create firewall policy
body = self.client.create_firewall_policy(
name=data_utils.rand_name("fw-policy"))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
# Insert rule to firewall policy
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id1, '', '')
# Verify insertion of rule in policy
self.assertIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
# Create another firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="icmp")
fw_rule_id2 = body['firewall_rule']['id']
self.addCleanup(self._try_delete_rule, fw_rule_id2)
# Insert rule to firewall policy after the first rule
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id2, fw_rule_id1, '')
# Verify the posiition of rule after insertion
fw_rule = self.client.show_firewall_rule(
fw_rule_id2)
self.assertEqual(int(fw_rule['firewall_rule']['position']), 2)
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
fw_policy_id, fw_rule_id2)
# Insert rule to firewall policy before the first rule
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id2, '', fw_rule_id1)
# Verify the posiition of rule after insertion
fw_rule = self.client.show_firewall_rule(
fw_rule_id2)
self.assertEqual(int(fw_rule['firewall_rule']['position']), 1)
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
fw_policy_id, fw_rule_id2)
# Verify removal of rule from firewall policy
self.assertNotIn(fw_rule_id2, self._get_list_fw_rule_ids(fw_policy_id))
# Remove rule from the firewall policy
self.client.remove_firewall_rule_from_policy(
fw_policy_id, fw_rule_id1)
# Verify removal of rule from firewall policy
self.assertNotIn(fw_rule_id1, self._get_list_fw_rule_ids(fw_policy_id))
def _get_list_fw_rule_ids(self, fw_policy_id):
fw_policy = self.client.show_firewall_policy(
fw_policy_id)
return [ruleid for ruleid in fw_policy['firewall_policy']
['firewall_rules']]
def test_update_firewall_policy_audited_attribute(self):
# Create firewall rule
body = self.client.create_firewall_rule(
name=data_utils.rand_name("fw-rule"),
action="allow",
protocol="icmp")
fw_rule_id = body['firewall_rule']['id']
self.addCleanup(self._try_delete_rule, fw_rule_id)
# Create firewall policy
body = self.client.create_firewall_policy(
name=data_utils.rand_name('fw-policy'))
fw_policy_id = body['firewall_policy']['id']
self.addCleanup(self._try_delete_policy, fw_policy_id)
self.assertFalse(body['firewall_policy']['audited'])
# Update firewall policy audited attribute to ture
self.client.update_firewall_policy(fw_policy_id,
audited=True)
# Insert Firewall rule to firewall policy
self.client.insert_firewall_rule_in_policy(
fw_policy_id, fw_rule_id, '', '')
body = self.client.show_firewall_policy(
fw_policy_id)
self.assertFalse(body['firewall_policy']['audited'])
|
|
#!/usr/bin/env python
from threading import Lock
import cv_bridge
import glob
import message_filters
import numpy as np
import os.path as osp
import rospy
import skimage.draw
import skimage.morphology
import tf
import yaml
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseArray
from jsk_recognition_msgs.msg import BoundingBox
from jsk_recognition_msgs.msg import BoundingBoxArray
from jsk_topic_tools import ConnectionBasedTransport
from sensor_msgs.msg import Image
from std_srvs.srv import SetBool
from std_srvs.srv import SetBoolResponse
import grasp_fusion_lib
from grasp_fusion_lib.contrib.grasp_fusion.utils import get_primitives_poses
class PrimitiveMatching(ConnectionBasedTransport):
def __init__(self):
super(PrimitiveMatching, self).__init__()
self.br = cv_bridge.CvBridge()
self.instance_bg_label = rospy.get_param('~instance_bg_label')
self.heightmap_frame = rospy.get_param('~heightmap_frame')
# Size[m] of each height map pixel
self.voxel_size = rospy.get_param('~voxel_size')
self.cluster_tolerance = rospy.get_param('~cluster_tolerance', 0.02)
self.cluster_max_size = rospy.get_param('~cluster_max_size')
self.cluster_min_size = rospy.get_param('~cluster_min_size')
self.prob_threshold = rospy.get_param('~prob_threshold', 0.5)
self.reliable_pts_ratio = rospy.get_param('~reliable_pts_ratio', 0.25)
# Directory of grasp primitives
self.primitive_dir = rospy.get_param('~primitive_dir')
self.primitives = []
if not osp.isdir(self.primitive_dir):
err = 'Input primitive_dir is not directory: %s' \
% self.primitive_dir
rospy.logfatal(err)
rospy.signal_shutdown(err)
return
filenames = sorted(glob.glob(self.primitive_dir + "/*"))
for fname in filenames:
with open(fname) as f:
self.primitives.append(yaml.load(f))
# ROS publishers
self.pubs_poses = []
self.pubs_boxes = []
for prim in self.primitives:
self.pubs_poses.append(
self.advertise('~output/' + prim['label'] + '/poses',
PoseArray, queue_size=1))
self.pubs_boxes.append(
self.advertise('~output/' + prim['label'] + '/boxes',
BoundingBoxArray, queue_size=1))
self.pub_debug = self.advertise('~output/debug', Image, queue_size=1)
self.lock = Lock()
self.ignore_ins = False
self.srv_ignore_ins = rospy.Service(
'~ignore_instance', SetBool, self.ignore_ins_cb)
def subscribe(self):
self.sub_rgb = message_filters.Subscriber(
'~input/rgb', Image, queue_size=1, buff_size=2**24
)
self.sub_depth = message_filters.Subscriber(
'~input/depth', Image, queue_size=1, buff_size=2**24
)
self.sub_lbl_ins = message_filters.Subscriber(
'~input/label_instance', Image, queue_size=1, buff_size=2**24
)
self.sub_prob_pinch_aff = message_filters.Subscriber(
'~input/prob_pinch_affordance', Image,
queue_size=1, buff_size=2**24
)
self.sub_prob_pinch_sole_aff = message_filters.Subscriber(
'~input/prob_pinch_sole_affordance', Image,
queue_size=1, buff_size=2**24
)
self.sub_prob_suc_aff = message_filters.Subscriber(
'~input/prob_suction_affordance', Image,
queue_size=1, buff_size=2**24
)
sync = message_filters.TimeSynchronizer([
self.sub_rgb,
self.sub_depth,
self.sub_lbl_ins,
self.sub_prob_pinch_aff,
self.sub_prob_pinch_sole_aff,
self.sub_prob_suc_aff
], queue_size=100)
sync.registerCallback(self._cb)
def unsubscribe(self):
self.sub_depth.unregister()
self.sub_lbl_ins.unregister()
self.sub_prob_pinch_aff.unregister()
self.sub_prob_pinch_sole_aff.unregister()
self.sub_prob_suc_aff.unregister()
def _cb(
self,
imgmsg,
depthmsg,
lbl_ins_msg,
prob_pinch_aff_msg,
prob_pinch_sole_aff_msg,
prob_suc_aff_msg,
):
img = self.br.imgmsg_to_cv2(imgmsg, desired_encoding='rgb8')
depth = self.br.imgmsg_to_cv2(depthmsg, desired_encoding='32FC1')
lbl_ins = self.br.imgmsg_to_cv2(
lbl_ins_msg, desired_encoding='passthrough'
)
prob_pinch_aff = self.br.imgmsg_to_cv2(
prob_pinch_aff_msg, desired_encoding='passthrough'
)
prob_pinch_sole_aff = self.br.imgmsg_to_cv2(
prob_pinch_sole_aff_msg, desired_encoding='passthrough'
)
prob_suc_aff = self.br.imgmsg_to_cv2(
prob_suc_aff_msg, desired_encoding='passthrough'
)
with self.lock:
if self.ignore_ins:
lbl_ins = np.ones((lbl_ins.shape[0], lbl_ins.shape[1]),
dtype=lbl_ins.dtype)
prim_posess = get_primitives_poses(
self.primitives,
depth,
[prob_pinch_aff, prob_pinch_sole_aff, prob_suc_aff],
['pinch', 'pinch_sole', 'suction'],
self.cluster_tolerance,
self.cluster_max_size,
self.cluster_min_size,
voxel_size=self.voxel_size,
instance_label=lbl_ins,
instance_bg_label=self.instance_bg_label,
prob_threshold=self.prob_threshold,
reliable_pts_ratio=self.reliable_pts_ratio,
)
# Correction values for padding in get_heightmap
corr_x_m = 10 * self.voxel_size
corr_y_m = 12 * self.voxel_size
for i, poses in enumerate(prim_posess):
poses_msg = PoseArray()
poses_msg.header.stamp = depthmsg.header.stamp
poses_msg.header.frame_id = self.heightmap_frame
bboxes_msg = BoundingBoxArray()
bboxes_msg.header.stamp = depthmsg.header.stamp
bboxes_msg.header.frame_id = self.heightmap_frame
for pose in poses:
# Pose
pos_xy_pix = np.round(pose[1]).astype(int)
pos_xy_m = pose[1] * self.voxel_size
rad = np.radians(pose[2])
quat = tf.transformations.quaternion_about_axis(
rad, (0, 0, 1)
)
pos_z_m = depth[pos_xy_pix[1], pos_xy_pix[0]]
pose_msg = Pose()
pose_msg.position.x = pos_xy_m[0] - corr_x_m
pose_msg.position.y = pos_xy_m[1] - corr_y_m
pose_msg.position.z = pos_z_m
pose_msg.orientation.x = quat[0]
pose_msg.orientation.y = quat[1]
pose_msg.orientation.z = quat[2]
pose_msg.orientation.w = quat[3]
poses_msg.poses.append(pose_msg)
# Bounding box of instance
ins_mask = (lbl_ins == pose[0]) * (depth > 0)
# Denoise mask
skimage.morphology.remove_small_objects(
ins_mask, min_size=50, connectivity=1, in_place=True)
# array([[y, x], [y, x], ...])
ins_pts = np.array(np.where(ins_mask)).T
# array([[x, y], [x, y], ...])
pts_xy = ins_pts[:, [1, 0]]
rot = np.array([[np.cos(rad), -np.sin(rad)],
[np.sin(rad), np.cos(rad)]])
pts_aligned = np.dot(pts_xy, rot)
pts_center = np.mean(pts_xy, axis=0) * self.voxel_size
ins_depth = depth[ins_mask]
pts_center_z \
= (np.max(ins_depth) + np.min(ins_depth)) / 2
bbox_msg = BoundingBox()
bbox_msg.header.stamp = depthmsg.header.stamp
bbox_msg.header.frame_id = self.heightmap_frame
xs = pts_aligned[:, 0]
bbox_msg.dimensions.x \
= (np.max(xs) - np.min(xs)) * self.voxel_size
ys = pts_aligned[:, 1]
bbox_msg.dimensions.y \
= (np.max(ys) - np.min(ys)) * self.voxel_size
bbox_msg.dimensions.z \
= np.max(depth[ins_mask]) - np.min(depth[ins_mask])
bbox_msg.pose.position.x = pts_center[0] - corr_x_m
bbox_msg.pose.position.y = pts_center[1] - corr_y_m
bbox_msg.pose.position.z = pts_center_z
bbox_msg.pose.orientation = pose_msg.orientation
bboxes_msg.boxes.append(bbox_msg)
self.pubs_poses[i].publish(poses_msg)
self.pubs_boxes[i].publish(bboxes_msg)
# Publish image for debug
vizs = []
vizs.append(img)
vizs.append(grasp_fusion_lib.image.colorize_depth(
depth, min_value=0, max_value=0.3))
vizs.append(
grasp_fusion_lib.image.label2rgb(lbl_ins + 1, img, alpha=0.7)
)
viz = grasp_fusion_lib.image.colorize_heatmap(prob_suc_aff)
viz = grasp_fusion_lib.image.overlay_color_on_mono(
img_color=viz, img_mono=img, alpha=0.7
)
vizs.append(viz)
for c in range(prob_pinch_aff.shape[2]):
prob_c = prob_pinch_aff[:, :, c]
viz = grasp_fusion_lib.image.colorize_heatmap(prob_c)
viz = grasp_fusion_lib.image.overlay_color_on_mono(
img_color=viz, img_mono=img, alpha=0.7
)
vizs.append(viz)
for c in range(prob_pinch_sole_aff.shape[2]):
prob_c = prob_pinch_sole_aff[:, :, c]
viz = grasp_fusion_lib.image.colorize_heatmap(prob_c)
viz = grasp_fusion_lib.image.overlay_color_on_mono(
img_color=viz, img_mono=img, alpha=0.7
)
vizs.append(viz)
# vizs.extend([np.zeros_like(img)] * 2)
for poses in prim_posess:
vizs.append(self._primitive_poses2rgb(poses, img))
viz = grasp_fusion_lib.image.tile(
vizs, (-(-len(vizs) // 4), 4), boundary=True
)
debug_msg = self.br.cv2_to_imgmsg(viz, encoding='rgb8')
debug_msg.header.stamp = depthmsg.header.stamp
debug_msg.header.frame_id = self.heightmap_frame
self.pub_debug.publish(debug_msg)
def _primitive_poses2rgb(self, poses, img):
lbl = np.zeros(img.shape[:2], dtype=int)
for pose in poses:
rr, cc = skimage.draw.circle(
int(round(pose[1][1])), int(round(pose[1][0])), 5)
# Bug of skimage?
rr = np.where(rr < 0, 0, rr)
rr = np.where(rr >= lbl.shape[0], lbl.shape[0] - 1, rr)
cc = np.where(cc < 0, 0, cc)
cc = np.where(cc >= lbl.shape[1], lbl.shape[1] - 1, cc)
lbl[rr, cc] = pose[0] + 1
return grasp_fusion_lib.image.label2rgb(lbl, img, alpha=0.7)
def ignore_ins_cb(self, req):
with self.lock:
self.ignore_ins = req.data
return SetBoolResponse(success=True)
if __name__ == '__main__':
rospy.init_node('primitive_matching')
node = PrimitiveMatching()
rospy.spin()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utilities supporting export to SavedModel (deprecated).
This module and all its submodules are deprecated. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for migration instructions.
Some contents of this file are moved to tensorflow/python/estimator/export.py:
get_input_alternatives() -> obsolete
get_output_alternatives() -> obsolete, but see _get_default_export_output()
build_all_signature_defs() -> build_all_signature_defs()
get_timestamped_export_directory() -> get_timestamped_export_directory()
_get_* -> obsolete
_is_* -> obsolete
Functionality of build_standardized_signature_def() is moved to
tensorflow/python/estimator/export_output.py as ExportOutput.as_signature_def().
Anything to do with ExportStrategies or garbage collection is not moved.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import time
from tensorflow.contrib.layers.python.layers import feature_column
from tensorflow.contrib.learn.python.learn import export_strategy
from tensorflow.contrib.learn.python.learn.estimators import constants
from tensorflow.contrib.learn.python.learn.estimators import metric_key
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.contrib.learn.python.learn.utils import gc
from tensorflow.contrib.learn.python.learn.utils import input_fn_utils
from tensorflow.python.estimator import estimator as core_estimator
from tensorflow.python.estimator.export import export as core_export
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.summary import summary_iterator
from tensorflow.python.training import saver
from tensorflow.python.util import compat
from tensorflow.python.util.deprecation import deprecated
# A key for use in the input_alternatives dict indicating the default input.
# This is the input that will be expected when a serving request does not
# specify a specific signature.
# The default input alternative specifies placeholders that the input_fn
# requires to be fed (in the typical case, a single placeholder for a
# serialized tf.Example).
DEFAULT_INPUT_ALTERNATIVE_KEY = 'default_input_alternative'
# A key for use in the input_alternatives dict indicating the features input.
# The features inputs alternative specifies the feature Tensors provided as
# input to the model_fn, i.e. the outputs of the input_fn.
FEATURES_INPUT_ALTERNATIVE_KEY = 'features_input_alternative'
# A key for use in the output_alternatives dict indicating the default output.
# This is the output that will be provided when a serving request does not
# specify a specific signature.
# In a single-headed model, the single output is automatically the default.
# In a multi-headed model, the name of the desired default head should be
# provided to get_output_alternatives.
_FALLBACK_DEFAULT_OUTPUT_ALTERNATIVE_KEY = 'default_output_alternative'
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def build_standardized_signature_def(input_tensors, output_tensors,
problem_type):
"""Build a SignatureDef using problem type and input and output Tensors.
Note that this delegates the actual creation of the signatures to methods in
//third_party/tensorflow/python/saved_model/signature_def_utils.py, which may
assign names to the input and output tensors (depending on the problem type)
that are standardized in the context of SavedModel.
Args:
input_tensors: a dict of string key to `Tensor`
output_tensors: a dict of string key to `Tensor`
problem_type: an instance of constants.ProblemType, specifying
classification, regression, etc.
Returns:
A SignatureDef using SavedModel standard keys where possible.
Raises:
ValueError: if input_tensors or output_tensors is None or empty.
"""
if not input_tensors:
raise ValueError('input_tensors must be provided.')
if not output_tensors:
raise ValueError('output_tensors must be provided.')
# Per-method signature_def functions will standardize the keys if possible
if _is_classification_problem(problem_type, input_tensors, output_tensors):
(_, examples), = input_tensors.items()
classes = _get_classification_classes(output_tensors)
scores = _get_classification_scores(output_tensors)
if classes is None and scores is None:
items = list(output_tensors.items())
if items[0][1].dtype == dtypes.string:
(_, classes), = items
else:
(_, scores), = items
return signature_def_utils.classification_signature_def(
examples, classes, scores)
elif _is_regression_problem(problem_type, input_tensors, output_tensors):
(_, examples), = input_tensors.items()
(_, predictions), = output_tensors.items()
return signature_def_utils.regression_signature_def(examples, predictions)
else:
return signature_def_utils.predict_signature_def(input_tensors,
output_tensors)
def _get_classification_scores(output_tensors):
scores = output_tensors.get(prediction_key.PredictionKey.SCORES)
if scores is None:
scores = output_tensors.get(prediction_key.PredictionKey.PROBABILITIES)
return scores
def _get_classification_classes(output_tensors):
classes = output_tensors.get(prediction_key.PredictionKey.CLASSES)
if classes is not None and classes.dtype != dtypes.string:
# Servo classification can only serve string classes.
return None
return classes
def _is_classification_problem(problem_type, input_tensors, output_tensors):
classes = _get_classification_classes(output_tensors)
scores = _get_classification_scores(output_tensors)
return ((problem_type == constants.ProblemType.CLASSIFICATION or
problem_type == constants.ProblemType.LOGISTIC_REGRESSION) and
len(input_tensors) == 1 and
(classes is not None or scores is not None or
len(output_tensors) == 1))
def _is_regression_problem(problem_type, input_tensors, output_tensors):
return (problem_type == constants.ProblemType.LINEAR_REGRESSION and
len(input_tensors) == 1 and len(output_tensors) == 1)
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def get_input_alternatives(input_ops):
"""Obtain all input alternatives using the input_fn output and heuristics."""
input_alternatives = {}
if isinstance(input_ops, input_fn_utils.InputFnOps):
features, unused_labels, default_inputs = input_ops
input_alternatives[DEFAULT_INPUT_ALTERNATIVE_KEY] = default_inputs
else:
features, unused_labels = input_ops
if not features:
raise ValueError('Features must be defined.')
# TODO(b/34253951): reinstate the "features" input_signature.
# The "features" input_signature, as written, does not work with
# SparseTensors. It is simply commented out as a stopgap, pending discussion
# on the bug as to the correct solution.
# Add the "features" input_signature in any case.
# Note defensive copy because model_fns alter the features dict.
# input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY] = (
# copy.copy(features))
return input_alternatives, features
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def get_output_alternatives(model_fn_ops, default_output_alternative_key=None):
"""Obtain all output alternatives using the model_fn output and heuristics.
Args:
model_fn_ops: a `ModelFnOps` object produced by a `model_fn`. This may or
may not have output_alternatives populated.
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Not needed for single-headed models.
Returns:
A tuple of (output_alternatives, actual_default_output_alternative_key),
where the latter names the head that will actually be served by default.
This may differ from the requested default_output_alternative_key when
a) no output_alternatives are provided at all, so one must be generated, or
b) there is exactly one head, which is used regardless of the requested
default.
Raises:
ValueError: if the requested default_output_alternative_key is not available
in output_alternatives, or if there are multiple output_alternatives and
no default is specified.
"""
output_alternatives = model_fn_ops.output_alternatives
if not output_alternatives:
if default_output_alternative_key:
raise ValueError('Requested default_output_alternative: {}, '
'but available output_alternatives are: []'.format(
default_output_alternative_key))
# Lacking provided output alternatives, the best we can do is to
# interpret the model as single-headed of unknown type.
default_problem_type = constants.ProblemType.UNSPECIFIED
default_outputs = model_fn_ops.predictions
if not isinstance(default_outputs, dict):
default_outputs = {prediction_key.PredictionKey.GENERIC: default_outputs}
actual_default_output_alternative_key = (
_FALLBACK_DEFAULT_OUTPUT_ALTERNATIVE_KEY)
output_alternatives = {
actual_default_output_alternative_key: (default_problem_type,
default_outputs)
}
return output_alternatives, actual_default_output_alternative_key
if default_output_alternative_key:
# If a default head is provided, use it.
if default_output_alternative_key in output_alternatives:
return output_alternatives, default_output_alternative_key
raise ValueError('Requested default_output_alternative: {}, '
'but available output_alternatives are: {}'.format(
default_output_alternative_key,
sorted(output_alternatives.keys())))
if len(output_alternatives) == 1:
# If there is only one head, use it as the default regardless of its name.
(actual_default_output_alternative_key, _), = output_alternatives.items()
return output_alternatives, actual_default_output_alternative_key
raise ValueError('Please specify a default_output_alternative. '
'Available output_alternatives are: {}'.format(
sorted(output_alternatives.keys())))
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def build_all_signature_defs(input_alternatives, output_alternatives,
actual_default_output_alternative_key):
"""Build `SignatureDef`s from all pairs of input and output alternatives."""
signature_def_map = {('%s:%s' % (input_key, output_key or 'None')):
build_standardized_signature_def(inputs, outputs,
problem_type)
for input_key, inputs in input_alternatives.items()
for output_key, (problem_type,
outputs) in output_alternatives.items()}
# Add the default SignatureDef
default_inputs = input_alternatives.get(DEFAULT_INPUT_ALTERNATIVE_KEY)
if not default_inputs:
raise ValueError('A default input_alternative must be provided.')
# default_inputs = input_alternatives[FEATURES_INPUT_ALTERNATIVE_KEY]
# default outputs are guaranteed to exist above
(default_problem_type, default_outputs) = (
output_alternatives[actual_default_output_alternative_key])
signature_def_map[signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY] = (
build_standardized_signature_def(default_inputs, default_outputs,
default_problem_type))
return signature_def_map
# When we create a timestamped directory, there is a small chance that the
# directory already exists because another worker is also writing exports.
# In this case we just wait one second to get a new timestamp and try again.
# If this fails several times in a row, then something is seriously wrong.
MAX_DIRECTORY_CREATION_ATTEMPTS = 10
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def get_timestamped_export_dir(export_dir_base):
"""Builds a path to a new subdirectory within the base directory.
Each export is written into a new subdirectory named using the
current time. This guarantees monotonically increasing version
numbers even across multiple runs of the pipeline.
The timestamp used is the number of seconds since epoch UTC.
Args:
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
Returns:
The full path of the new subdirectory (which is not actually created yet).
Raises:
RuntimeError: if repeated attempts fail to obtain a unique timestamped
directory name.
"""
attempts = 0
while attempts < MAX_DIRECTORY_CREATION_ATTEMPTS:
export_timestamp = int(time.time())
export_dir = os.path.join(
compat.as_bytes(export_dir_base),
compat.as_bytes(str(export_timestamp)))
if not gfile.Exists(export_dir):
# Collisions are still possible (though extremely unlikely): this
# directory is not actually created yet, but it will be almost
# instantly on return from this function.
return export_dir
time.sleep(1)
attempts += 1
logging.warn('Export directory {} already exists; retrying (attempt {}/{})'.
format(export_dir, attempts, MAX_DIRECTORY_CREATION_ATTEMPTS))
raise RuntimeError('Failed to obtain a unique export directory name after '
'{} attempts.'.format(MAX_DIRECTORY_CREATION_ATTEMPTS))
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def get_temp_export_dir(timestamped_export_dir):
"""Builds a directory name based on the argument but starting with 'temp-'.
This relies on the fact that TensorFlow Serving ignores subdirectories of
the base directory that can't be parsed as integers.
Args:
timestamped_export_dir: the name of the eventual export directory, e.g.
/foo/bar/<timestamp>
Returns:
A sister directory prefixed with 'temp-', e.g. /foo/bar/temp-<timestamp>.
"""
(dirname, basename) = os.path.split(timestamped_export_dir)
temp_export_dir = os.path.join(
compat.as_bytes(dirname), compat.as_bytes('temp-{}'.format(basename)))
return temp_export_dir
# create a simple parser that pulls the export_version from the directory.
def _export_version_parser(path):
filename = os.path.basename(path.path)
if not (len(filename) == 10 and filename.isdigit()):
return None
return path._replace(export_version=int(filename))
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def get_most_recent_export(export_dir_base):
"""Locate the most recent SavedModel export in a directory of many exports.
This method assumes that SavedModel subdirectories are named as a timestamp
(seconds from epoch), as produced by get_timestamped_export_dir().
Args:
export_dir_base: A base directory containing multiple timestamped
directories.
Returns:
A gc.Path, with is just a namedtuple of (path, export_version).
"""
select_filter = gc.largest_export_versions(1)
results = select_filter(
gc.get_paths(export_dir_base, parser=_export_version_parser))
return next(iter(results or []), None)
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def garbage_collect_exports(export_dir_base, exports_to_keep):
"""Deletes older exports, retaining only a given number of the most recent.
Export subdirectories are assumed to be named with monotonically increasing
integers; the most recent are taken to be those with the largest values.
Args:
export_dir_base: the base directory under which each export is in a
versioned subdirectory.
exports_to_keep: the number of recent exports to retain.
"""
if exports_to_keep is None:
return
keep_filter = gc.largest_export_versions(exports_to_keep)
delete_filter = gc.negation(keep_filter)
for p in delete_filter(
gc.get_paths(export_dir_base, parser=_export_version_parser)):
try:
gfile.DeleteRecursively(p.path)
except errors_impl.NotFoundError as e:
logging.warn('Can not delete %s recursively: %s', p.path, e)
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def make_export_strategy(serving_input_fn,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=5,
strip_default_attrs=None):
"""Create an ExportStrategy for use with Experiment.
Args:
serving_input_fn: A function that takes no arguments and returns an
`InputFnOps`.
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Must be `None` if the estimator inherits from @{tf.estimator.Estimator}
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to None to disable garbage
collection.
strip_default_attrs: Boolean. If True, default attrs in the
`GraphDef` will be stripped on write. This is recommended for better
forward compatibility of the resulting `SavedModel`.
Returns:
An ExportStrategy that can be passed to the Experiment constructor.
"""
def export_fn(estimator, export_dir_base, checkpoint_path=None,
strip_default_attrs=False):
"""Exports the given Estimator as a SavedModel.
Args:
estimator: the Estimator to export.
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
strip_default_attrs: Boolean. If `True`, default-valued attributes will
be removed from the NodeDefs.
Returns:
The string path to the exported directory.
Raises:
ValueError: If `estimator` is a @{tf.estimator.Estimator} instance
and `default_output_alternative_key` was specified.
"""
if isinstance(estimator, core_estimator.Estimator):
if default_output_alternative_key is not None:
raise ValueError(
'default_output_alternative_key is not supported in core '
'Estimator. Given: {}'.format(default_output_alternative_key))
export_result = estimator.export_savedmodel(
export_dir_base,
serving_input_fn,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
else:
export_result = estimator.export_savedmodel(
export_dir_base,
serving_input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
checkpoint_path=checkpoint_path,
strip_default_attrs=strip_default_attrs)
garbage_collect_exports(export_dir_base, exports_to_keep)
return export_result
return export_strategy.ExportStrategy('Servo', export_fn, strip_default_attrs)
@deprecated(None,
'Use tf.estimator.export.build_parsing_serving_input_receiver_fn')
def make_parsing_export_strategy(feature_columns,
default_output_alternative_key=None,
assets_extra=None,
as_text=False,
exports_to_keep=5,
target_core=False,
strip_default_attrs=None):
"""Create an ExportStrategy for use with Experiment, using `FeatureColumn`s.
Creates a SavedModel export that expects to be fed with a single string
Tensor containing serialized tf.Examples. At serving time, incoming
tf.Examples will be parsed according to the provided `FeatureColumn`s.
Args:
feature_columns: An iterable of `FeatureColumn`s representing the features
that must be provided at serving time (excluding labels!).
default_output_alternative_key: the name of the head to serve when an
incoming serving request does not explicitly request a specific head.
Must be `None` if the estimator inherits from @{tf.estimator.Estimator}
or for single-headed models.
assets_extra: A dict specifying how to populate the assets.extra directory
within the exported SavedModel. Each key should give the destination
path (including the filename) relative to the assets.extra directory.
The corresponding value gives the full path of the source file to be
copied. For example, the simple case of copying a single file without
renaming it is specified as
`{'my_asset_file.txt': '/path/to/my_asset_file.txt'}`.
as_text: whether to write the SavedModel proto in text format.
exports_to_keep: Number of exports to keep. Older exports will be
garbage-collected. Defaults to 5. Set to None to disable garbage
collection.
target_core: If True, prepare an ExportStrategy for use with
tensorflow.python.estimator.*. If False (default), prepare an
ExportStrategy for use with tensorflow.contrib.learn.python.learn.*.
strip_default_attrs: Boolean. If True, default attrs in the
`GraphDef` will be stripped on write. This is recommended for better
forward compatibility of the resulting `SavedModel`.
Returns:
An ExportStrategy that can be passed to the Experiment constructor.
"""
feature_spec = feature_column.create_feature_spec_for_parsing(feature_columns)
if target_core:
serving_input_fn = (
core_export.build_parsing_serving_input_receiver_fn(feature_spec))
else:
serving_input_fn = (
input_fn_utils.build_parsing_serving_input_fn(feature_spec))
return make_export_strategy(
serving_input_fn,
default_output_alternative_key=default_output_alternative_key,
assets_extra=assets_extra,
as_text=as_text,
exports_to_keep=exports_to_keep,
strip_default_attrs=strip_default_attrs)
def _default_compare_fn(curr_best_eval_result, cand_eval_result):
"""Compares two evaluation results and returns true if the 2nd one is better.
Both evaluation results should have the values for MetricKey.LOSS, which are
used for comparison.
Args:
curr_best_eval_result: current best eval metrics.
cand_eval_result: candidate eval metrics.
Returns:
True if cand_eval_result is better.
Raises:
ValueError: If input eval result is None or no loss is available.
"""
default_key = metric_key.MetricKey.LOSS
if not curr_best_eval_result or default_key not in curr_best_eval_result:
raise ValueError(
'curr_best_eval_result cannot be empty or no loss is found in it.')
if not cand_eval_result or default_key not in cand_eval_result:
raise ValueError(
'cand_eval_result cannot be empty or no loss is found in it.')
return curr_best_eval_result[default_key] > cand_eval_result[default_key]
class BestModelSelector(object):
"""A helper that keeps track of export selection candidates.
THIS CLASS IS DEPRECATED. See
[contrib/learn/README.md](https://www.tensorflow.org/code/tensorflow/contrib/learn/README.md)
for general migration instructions.
"""
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def __init__(self, event_file_pattern=None, compare_fn=None):
"""Constructor of this class.
Args:
event_file_pattern: absolute event file name pattern.
compare_fn: a function that returns true if the candidate is better than
the current best model.
"""
self._compare_fn = compare_fn or _default_compare_fn
self._best_eval_result = self._get_best_eval_result(event_file_pattern)
def update(self, checkpoint_path, eval_result):
"""Records a given checkpoint and exports if this is the best model.
Args:
checkpoint_path: the checkpoint path to export.
eval_result: a dictionary which is usually generated in evaluation runs.
By default, eval_results contains 'loss' field.
Returns:
A string representing the path to the checkpoint to be exported.
A dictionary of the same type of eval_result.
Raises:
ValueError: if checkpoint path is empty.
ValueError: if eval_results is None object.
"""
if not checkpoint_path:
raise ValueError('Checkpoint path is empty.')
if eval_result is None:
raise ValueError('%s has empty evaluation results.', checkpoint_path)
if (self._best_eval_result is None or
self._compare_fn(self._best_eval_result, eval_result)):
self._best_eval_result = eval_result
return checkpoint_path, eval_result
else:
return '', None
def _get_best_eval_result(self, event_files):
"""Get the best eval result from event files.
Args:
event_files: Absolute pattern of event files.
Returns:
The best eval result.
"""
if not event_files:
return None
best_eval_result = None
for event_file in gfile.Glob(os.path.join(event_files)):
for event in summary_iterator.summary_iterator(event_file):
if event.HasField('summary'):
event_eval_result = {}
for value in event.summary.value:
if value.HasField('simple_value'):
event_eval_result[value.tag] = value.simple_value
if best_eval_result is None or self._compare_fn(
best_eval_result, event_eval_result):
best_eval_result = event_eval_result
return best_eval_result
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def make_best_model_export_strategy(
serving_input_fn,
exports_to_keep=1,
model_dir=None,
event_file_pattern=None,
compare_fn=None,
default_output_alternative_key=None,
strip_default_attrs=None):
"""Creates an custom ExportStrategy for use with tf.contrib.learn.Experiment.
Args:
serving_input_fn: a function that takes no arguments and returns an
`InputFnOps`.
exports_to_keep: an integer indicating how many historical best models need
to be preserved.
model_dir: Directory where model parameters, graph etc. are saved. This will
be used to load eval metrics from the directory when the export strategy
is created. So the best metrics would not be lost even if the export
strategy got preempted, which guarantees that only the best model would
be exported regardless of preemption. If None, however, the export
strategy would not be preemption-safe. To be preemption-safe, both
model_dir and event_file_pattern would be needed.
event_file_pattern: event file name pattern relative to model_dir, e.g.
"eval_continuous/*.tfevents.*". If None, however, the export strategy
would not be preemption-safe. To be preemption-safe, both
model_dir and event_file_pattern would be needed.
compare_fn: a function that select the 'best' candidate from a dictionary
of evaluation result keyed by corresponding checkpoint path.
default_output_alternative_key: the key for default serving signature for
multi-headed inference graphs.
strip_default_attrs: Boolean. If True, default attrs in the
`GraphDef` will be stripped on write. This is recommended for better
forward compatibility of the resulting `SavedModel`.
Returns:
An ExportStrategy that can be passed to the Experiment constructor.
"""
best_model_export_strategy = make_export_strategy(
serving_input_fn,
exports_to_keep=exports_to_keep,
default_output_alternative_key=default_output_alternative_key,
strip_default_attrs=strip_default_attrs)
full_event_file_pattern = os.path.join(
model_dir,
event_file_pattern) if model_dir and event_file_pattern else None
best_model_selector = BestModelSelector(full_event_file_pattern, compare_fn)
def export_fn(estimator, export_dir_base, checkpoint_path, eval_result=None):
"""Exports the given Estimator as a SavedModel.
Args:
estimator: the Estimator to export.
export_dir_base: A string containing a directory to write the exported
graph and checkpoints.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
eval_result: placehold args matching the call signature of ExportStrategy.
Returns:
The string path to the exported directory.
"""
if not checkpoint_path:
# TODO(b/67425018): switch to
# checkpoint_path = estimator.latest_checkpoint()
# as soon as contrib is cleaned up and we can thus be sure that
# estimator is a tf.estimator.Estimator and not a
# tf.contrib.learn.Estimator
checkpoint_path = saver.latest_checkpoint(estimator.model_dir)
export_checkpoint_path, export_eval_result = best_model_selector.update(
checkpoint_path, eval_result)
if export_checkpoint_path and export_eval_result is not None:
checkpoint_base = os.path.basename(export_checkpoint_path)
export_dir = os.path.join(export_dir_base, checkpoint_base)
return best_model_export_strategy.export(
estimator, export_dir, export_checkpoint_path, export_eval_result)
else:
return ''
return export_strategy.ExportStrategy('best_model', export_fn)
# TODO(b/67013778): Revisit this approach when corresponding changes to
# TF Core are finalized.
@deprecated(None, 'Switch to tf.estimator.Exporter and associated utilities.')
def extend_export_strategy(base_export_strategy,
post_export_fn,
post_export_name=None):
"""Extend ExportStrategy, calling post_export_fn after export.
Args:
base_export_strategy: An ExportStrategy that can be passed to the Experiment
constructor.
post_export_fn: A user-specified function to call after exporting the
SavedModel. Takes two arguments - the path to the SavedModel exported by
base_export_strategy and the directory where to export the SavedModel
modified by the post_export_fn. Returns the path to the exported
SavedModel.
post_export_name: The directory name under the export base directory where
SavedModels generated by the post_export_fn will be written. If None, the
directory name of base_export_strategy is used.
Returns:
An ExportStrategy that can be passed to the Experiment constructor.
"""
def export_fn(estimator, export_dir_base, checkpoint_path=None):
"""Exports the given Estimator as a SavedModel and invokes post_export_fn.
Args:
estimator: the Estimator to export.
export_dir_base: A string containing a directory to write the exported
graphs and checkpoint.
checkpoint_path: The checkpoint path to export. If None (the default),
the most recent checkpoint found within the model directory is chosen.
Returns:
The string path to the SavedModel indicated by post_export_fn.
Raises:
ValueError: If `estimator` is a @{tf.estimator.Estimator} instance
and `default_output_alternative_key` was specified or if post_export_fn
does not return a valid directory.
RuntimeError: If unable to create temporary or final export directory.
"""
tmp_base_export_folder = 'temp-base-export-' + str(int(time.time()))
tmp_base_export_dir = os.path.join(export_dir_base, tmp_base_export_folder)
if gfile.Exists(tmp_base_export_dir):
raise RuntimeError('Failed to obtain base export directory')
gfile.MakeDirs(tmp_base_export_dir)
tmp_base_export = base_export_strategy.export(
estimator, tmp_base_export_dir, checkpoint_path)
tmp_post_export_folder = 'temp-post-export-' + str(int(time.time()))
tmp_post_export_dir = os.path.join(export_dir_base, tmp_post_export_folder)
if gfile.Exists(tmp_post_export_dir):
raise RuntimeError('Failed to obtain temp export directory')
gfile.MakeDirs(tmp_post_export_dir)
tmp_post_export = post_export_fn(tmp_base_export, tmp_post_export_dir)
if not tmp_post_export.startswith(tmp_post_export_dir):
raise ValueError('post_export_fn must return a sub-directory of {}'
.format(tmp_post_export_dir))
post_export_relpath = os.path.relpath(tmp_post_export, tmp_post_export_dir)
post_export = os.path.join(export_dir_base, post_export_relpath)
if gfile.Exists(post_export):
raise RuntimeError('Failed to obtain final export directory')
gfile.Rename(tmp_post_export, post_export)
gfile.DeleteRecursively(tmp_base_export_dir)
gfile.DeleteRecursively(tmp_post_export_dir)
return post_export
name = post_export_name if post_export_name else base_export_strategy.name
return export_strategy.ExportStrategy(name, export_fn)
|
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import contextlib
import functools
import json
import logging
import re
import sys
from inspect import getdoc
from operator import attrgetter
from . import errors
from . import signals
from .. import __version__
from ..config import config
from ..config import ConfigurationError
from ..config import parse_environment
from ..config.environment import Environment
from ..config.serialize import serialize_config
from ..const import DEFAULT_TIMEOUT
from ..const import IS_WINDOWS_PLATFORM
from ..progress_stream import StreamOutputError
from ..project import NoSuchService
from ..project import OneOffFilter
from ..service import BuildAction
from ..service import BuildError
from ..service import ConvergenceStrategy
from ..service import ImageType
from ..service import NeedsBuildError
from .command import get_config_path_from_options
from .command import project_from_options
from .docopt_command import DocoptDispatcher
from .docopt_command import get_handler
from .docopt_command import NoSuchCommand
from .errors import UserError
from .formatter import ConsoleWarningFormatter
from .formatter import Formatter
from .log_printer import build_log_presenters
from .log_printer import LogPrinter
from .utils import get_version_info
from .utils import yesno
if not IS_WINDOWS_PLATFORM:
from dockerpty.pty import PseudoTerminal, RunOperation, ExecOperation
log = logging.getLogger(__name__)
console_handler = logging.StreamHandler(sys.stderr)
def main():
command = dispatch()
try:
command()
except (KeyboardInterrupt, signals.ShutdownException):
log.error("Aborting.")
sys.exit(1)
except (UserError, NoSuchService, ConfigurationError) as e:
log.error(e.msg)
sys.exit(1)
except BuildError as e:
log.error("Service '%s' failed to build: %s" % (e.service.name, e.reason))
sys.exit(1)
except StreamOutputError as e:
log.error(e)
sys.exit(1)
except NeedsBuildError as e:
log.error("Service '%s' needs to be built, but --no-build was passed." % e.service.name)
sys.exit(1)
except errors.ConnectionError:
sys.exit(1)
def dispatch():
setup_logging()
dispatcher = DocoptDispatcher(
TopLevelCommand,
{'options_first': True, 'version': get_version_info('compose')})
try:
options, handler, command_options = dispatcher.parse(sys.argv[1:])
except NoSuchCommand as e:
commands = "\n".join(parse_doc_section("commands:", getdoc(e.supercommand)))
log.error("No such command: %s\n\n%s", e.command, commands)
sys.exit(1)
setup_console_handler(console_handler, options.get('--verbose'))
return functools.partial(perform_command, options, handler, command_options)
def perform_command(options, handler, command_options):
if options['COMMAND'] in ('help', 'version'):
# Skip looking up the compose file.
handler(command_options)
return
if options['COMMAND'] == 'config':
command = TopLevelCommand(None)
handler(command, options, command_options)
return
project = project_from_options('.', options)
command = TopLevelCommand(project)
with errors.handle_connection_errors(project.client):
handler(command, command_options)
def setup_logging():
root_logger = logging.getLogger()
root_logger.addHandler(console_handler)
root_logger.setLevel(logging.DEBUG)
# Disable requests logging
logging.getLogger("requests").propagate = False
def setup_console_handler(handler, verbose):
if handler.stream.isatty():
format_class = ConsoleWarningFormatter
else:
format_class = logging.Formatter
if verbose:
handler.setFormatter(format_class('%(name)s.%(funcName)s: %(message)s'))
handler.setLevel(logging.DEBUG)
else:
handler.setFormatter(format_class())
handler.setLevel(logging.INFO)
# stolen from docopt master
def parse_doc_section(name, source):
pattern = re.compile('^([^\n]*' + name + '[^\n]*\n?(?:[ \t].*?(?:\n|$))*)',
re.IGNORECASE | re.MULTILINE)
return [s.strip() for s in pattern.findall(source)]
class TopLevelCommand(object):
"""Define and run multi-container applications with Docker.
Usage:
docker-compose [-f=<arg>...] [options] [COMMAND] [ARGS...]
docker-compose -h|--help
Options:
-f, --file FILE Specify an alternate compose file (default: docker-compose.yml)
-p, --project-name NAME Specify an alternate project name (default: directory name)
--verbose Show more output
-v, --version Print version and exit
-H, --host HOST Daemon socket to connect to
--tls Use TLS; implied by --tlsverify
--tlscacert CA_PATH Trust certs signed only by this CA
--tlscert CLIENT_CERT_PATH Path to TLS certificate file
--tlskey TLS_KEY_PATH Path to TLS key file
--tlsverify Use TLS and verify the remote
--skip-hostname-check Don't check the daemon's hostname against the name specified
in the client certificate (for example if your docker host
is an IP address)
Commands:
build Build or rebuild services
config Validate and view the compose file
create Create services
down Stop and remove containers, networks, images, and volumes
events Receive real time events from containers
exec Execute a command in a running container
help Get help on a command
kill Kill containers
logs View output from containers
pause Pause services
port Print the public port for a port binding
ps List containers
pull Pulls service images
restart Restart services
rm Remove stopped containers
run Run a one-off command
scale Set number of containers for a service
start Start services
stop Stop services
unpause Unpause services
up Create and start containers
version Show the Docker-Compose version information
"""
def __init__(self, project, project_dir='.'):
self.project = project
self.project_dir = '.'
def build(self, options):
"""
Build or rebuild services.
Services are built once and then tagged as `project_service`,
e.g. `composetest_db`. If you change a service's `Dockerfile` or the
contents of its build directory, you can run `docker-compose build` to rebuild it.
Usage: build [options] [SERVICE...]
Options:
--force-rm Always remove intermediate containers.
--no-cache Do not use cache when building the image.
--pull Always attempt to pull a newer version of the image.
"""
self.project.build(
service_names=options['SERVICE'],
no_cache=bool(options.get('--no-cache', False)),
pull=bool(options.get('--pull', False)),
force_rm=bool(options.get('--force-rm', False)))
def config(self, config_options, options):
"""
Validate and view the compose file.
Usage: config [options]
Options:
-q, --quiet Only validate the configuration, don't print
anything.
--services Print the service names, one per line.
"""
environment = Environment.from_env_file(self.project_dir)
config_path = get_config_path_from_options(
self.project_dir, config_options, environment
)
compose_config = config.load(
config.find(self.project_dir, config_path, environment)
)
if options['--quiet']:
return
if options['--services']:
print('\n'.join(service['name'] for service in compose_config.services))
return
print(serialize_config(compose_config))
def create(self, options):
"""
Creates containers for a service.
Usage: create [options] [SERVICE...]
Options:
--force-recreate Recreate containers even if their configuration and
image haven't changed. Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing.
--build Build images before creating containers.
"""
service_names = options['SERVICE']
self.project.create(
service_names=service_names,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
)
def down(self, options):
"""
Stop containers and remove containers, networks, volumes, and images
created by `up`. Only containers and networks are removed by default.
Usage: down [options]
Options:
--rmi type Remove images, type may be one of: 'all' to remove
all images, or 'local' to remove only images that
don't have an custom name set by the `image` field
-v, --volumes Remove data volumes
--remove-orphans Remove containers for services not defined in
the Compose file
"""
image_type = image_type_from_opt('--rmi', options['--rmi'])
self.project.down(image_type, options['--volumes'], options['--remove-orphans'])
def events(self, options):
"""
Receive real time events from containers.
Usage: events [options] [SERVICE...]
Options:
--json Output events as a stream of json objects
"""
def format_event(event):
attributes = ["%s=%s" % item for item in event['attributes'].items()]
return ("{time} {type} {action} {id} ({attrs})").format(
attrs=", ".join(sorted(attributes)),
**event)
def json_format_event(event):
event['time'] = event['time'].isoformat()
event.pop('container')
return json.dumps(event)
for event in self.project.events():
formatter = json_format_event if options['--json'] else format_event
print(formatter(event))
sys.stdout.flush()
def exec_command(self, options):
"""
Execute a command in a running container
Usage: exec [options] SERVICE COMMAND [ARGS...]
Options:
-d Detached mode: Run command in the background.
--privileged Give extended privileges to the process.
--user USER Run the command as this user.
-T Disable pseudo-tty allocation. By default `docker-compose exec`
allocates a TTY.
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
command = [options['COMMAND']] + options['ARGS']
tty = not options["-T"]
create_exec_options = {
"privileged": options["--privileged"],
"user": options["--user"],
"tty": tty,
"stdin": tty,
}
exec_id = container.create_exec(command, **create_exec_options)
if options['-d']:
container.start_exec(exec_id, tty=tty)
return
signals.set_signal_handler_to_shutdown()
try:
operation = ExecOperation(
self.project.client,
exec_id,
interactive=tty,
)
pty = PseudoTerminal(self.project.client, operation)
pty.start()
except signals.ShutdownException:
log.info("received shutdown exception: closing")
exit_code = self.project.client.exec_inspect(exec_id).get("ExitCode")
sys.exit(exit_code)
@classmethod
def help(cls, options):
"""
Get help on a command.
Usage: help COMMAND
"""
handler = get_handler(cls, options['COMMAND'])
raise SystemExit(getdoc(handler))
def kill(self, options):
"""
Force stop service containers.
Usage: kill [options] [SERVICE...]
Options:
-s SIGNAL SIGNAL to send to the container.
Default signal is SIGKILL.
"""
signal = options.get('-s', 'SIGKILL')
self.project.kill(service_names=options['SERVICE'], signal=signal)
def logs(self, options):
"""
View output from containers.
Usage: logs [options] [SERVICE...]
Options:
--no-color Produce monochrome output.
-f, --follow Follow log output.
-t, --timestamps Show timestamps.
--tail="all" Number of lines to show from the end of the logs
for each container.
"""
containers = self.project.containers(service_names=options['SERVICE'], stopped=True)
tail = options['--tail']
if tail is not None:
if tail.isdigit():
tail = int(tail)
elif tail != 'all':
raise UserError("tail flag must be all or a number")
log_args = {
'follow': options['--follow'],
'tail': tail,
'timestamps': options['--timestamps']
}
print("Attaching to", list_containers(containers))
log_printer_from_project(
self.project,
containers,
options['--no-color'],
log_args).run()
def pause(self, options):
"""
Pause services.
Usage: pause [SERVICE...]
"""
containers = self.project.pause(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to pause', 1)
def port(self, options):
"""
Print the public port for a port binding.
Usage: port [options] SERVICE PRIVATE_PORT
Options:
--protocol=proto tcp or udp [default: tcp]
--index=index index of the container if there are multiple
instances of a service [default: 1]
"""
index = int(options.get('--index'))
service = self.project.get_service(options['SERVICE'])
try:
container = service.get_container(number=index)
except ValueError as e:
raise UserError(str(e))
print(container.get_local_port(
options['PRIVATE_PORT'],
protocol=options.get('--protocol') or 'tcp') or '')
def ps(self, options):
"""
List containers.
Usage: ps [options] [SERVICE...]
Options:
-q Only display IDs
"""
containers = sorted(
self.project.containers(service_names=options['SERVICE'], stopped=True) +
self.project.containers(service_names=options['SERVICE'], one_off=OneOffFilter.only),
key=attrgetter('name'))
if options['-q']:
for container in containers:
print(container.id)
else:
headers = [
'Name',
'Command',
'State',
'Ports',
]
rows = []
for container in containers:
command = container.human_readable_command
if len(command) > 30:
command = '%s ...' % command[:26]
rows.append([
container.name,
command,
container.human_readable_state,
container.human_readable_ports,
])
print(Formatter().table(headers, rows))
def pull(self, options):
"""
Pulls images for services.
Usage: pull [options] [SERVICE...]
Options:
--ignore-pull-failures Pull what it can and ignores images with pull failures.
"""
self.project.pull(
service_names=options['SERVICE'],
ignore_pull_failures=options.get('--ignore-pull-failures')
)
def rm(self, options):
"""
Remove stopped service containers.
By default, volumes attached to containers will not be removed. You can see all
volumes with `docker volume ls`.
Any data which is not in a volume will be lost.
Usage: rm [options] [SERVICE...]
Options:
-f, --force Don't ask to confirm removal
-v Remove volumes associated with containers
-a, --all Also remove one-off containers created by
docker-compose run
"""
if options.get('--all'):
one_off = OneOffFilter.include
else:
log.warn(
'Not including one-off containers created by `docker-compose run`.\n'
'To include them, use `docker-compose rm --all`.\n'
'This will be the default behavior in the next version of Compose.\n')
one_off = OneOffFilter.exclude
all_containers = self.project.containers(
service_names=options['SERVICE'], stopped=True, one_off=one_off
)
stopped_containers = [c for c in all_containers if not c.is_running]
if len(stopped_containers) > 0:
print("Going to remove", list_containers(stopped_containers))
if options.get('--force') \
or yesno("Are you sure? [yN] ", default=False):
self.project.remove_stopped(
service_names=options['SERVICE'],
v=options.get('-v', False),
one_off=one_off
)
else:
print("No stopped containers")
def run(self, options):
"""
Run a one-off command on a service.
For example:
$ docker-compose run web python manage.py shell
By default, linked services will be started, unless they are already
running. If you do not want to start linked services, use
`docker-compose run --no-deps SERVICE COMMAND [ARGS...]`.
Usage: run [options] [-p PORT...] [-e KEY=VAL...] SERVICE [COMMAND] [ARGS...]
Options:
-d Detached mode: Run container in the background, print
new container name.
--name NAME Assign a name to the container
--entrypoint CMD Override the entrypoint of the image.
-e KEY=VAL Set an environment variable (can be used multiple times)
-u, --user="" Run as specified username or uid
--no-deps Don't start linked services.
--rm Remove container after run. Ignored in detached mode.
-p, --publish=[] Publish a container's port(s) to the host
--service-ports Run command with the service's ports enabled and mapped
to the host.
-T Disable pseudo-tty allocation. By default `docker-compose run`
allocates a TTY.
-w, --workdir="" Working directory inside the container
"""
service = self.project.get_service(options['SERVICE'])
detach = options['-d']
if IS_WINDOWS_PLATFORM and not detach:
raise UserError(
"Interactive mode is not yet supported on Windows.\n"
"Please pass the -d flag when using `docker-compose run`."
)
if options['--publish'] and options['--service-ports']:
raise UserError(
'Service port mapping and manual port mapping '
'can not be used togather'
)
if options['COMMAND']:
command = [options['COMMAND']] + options['ARGS']
else:
command = service.options.get('command')
container_options = build_container_options(options, detach, command)
run_one_off_container(container_options, self.project, service, options)
def scale(self, options):
"""
Set number of containers to run for a service.
Numbers are specified in the form `service=num` as arguments.
For example:
$ docker-compose scale web=2 worker=3
Usage: scale [options] [SERVICE=NUM...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
for s in options['SERVICE=NUM']:
if '=' not in s:
raise UserError('Arguments to scale should be in the form service=num')
service_name, num = s.split('=', 1)
try:
num = int(num)
except ValueError:
raise UserError('Number of containers for service "%s" is not a '
'number' % service_name)
self.project.get_service(service_name).scale(num, timeout=timeout)
def start(self, options):
"""
Start existing containers.
Usage: start [SERVICE...]
"""
containers = self.project.start(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to start', 1)
def stop(self, options):
"""
Stop running containers without removing them.
They can be started again with `docker-compose start`.
Usage: stop [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
self.project.stop(service_names=options['SERVICE'], timeout=timeout)
def restart(self, options):
"""
Restart running containers.
Usage: restart [options] [SERVICE...]
Options:
-t, --timeout TIMEOUT Specify a shutdown timeout in seconds.
(default: 10)
"""
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
containers = self.project.restart(service_names=options['SERVICE'], timeout=timeout)
exit_if(not containers, 'No containers to restart', 1)
def unpause(self, options):
"""
Unpause services.
Usage: unpause [SERVICE...]
"""
containers = self.project.unpause(service_names=options['SERVICE'])
exit_if(not containers, 'No containers to unpause', 1)
def up(self, options):
"""
Builds, (re)creates, starts, and attaches to containers for a service.
Unless they are already running, this command also starts any linked services.
The `docker-compose up` command aggregates the output of each container. When
the command exits, all containers are stopped. Running `docker-compose up -d`
starts the containers in the background and leaves them running.
If there are existing containers for a service, and the service's configuration
or image was changed after the container's creation, `docker-compose up` picks
up the changes by stopping and recreating the containers (preserving mounted
volumes). To prevent Compose from picking up changes, use the `--no-recreate`
flag.
If you want to force Compose to stop and recreate all containers, use the
`--force-recreate` flag.
Usage: up [options] [SERVICE...]
Options:
-d Detached mode: Run containers in the background,
print new container names.
Incompatible with --abort-on-container-exit.
--no-color Produce monochrome output.
--no-deps Don't start linked services.
--force-recreate Recreate containers even if their configuration
and image haven't changed.
Incompatible with --no-recreate.
--no-recreate If containers already exist, don't recreate them.
Incompatible with --force-recreate.
--no-build Don't build an image, even if it's missing.
--build Build images before starting containers.
--abort-on-container-exit Stops all containers if any container was stopped.
Incompatible with -d.
-t, --timeout TIMEOUT Use this timeout in seconds for container shutdown
when attached or when containers are already
running. (default: 10)
--remove-orphans Remove containers for services not
defined in the Compose file
"""
start_deps = not options['--no-deps']
cascade_stop = options['--abort-on-container-exit']
service_names = options['SERVICE']
timeout = int(options.get('--timeout') or DEFAULT_TIMEOUT)
remove_orphans = options['--remove-orphans']
detached = options.get('-d')
if detached and cascade_stop:
raise UserError("--abort-on-container-exit and -d cannot be combined.")
with up_shutdown_context(self.project, service_names, timeout, detached):
to_attach = self.project.up(
service_names=service_names,
start_deps=start_deps,
strategy=convergence_strategy_from_opts(options),
do_build=build_action_from_opts(options),
timeout=timeout,
detached=detached,
remove_orphans=remove_orphans)
if detached:
return
log_printer = log_printer_from_project(
self.project,
filter_containers_to_service_names(to_attach, service_names),
options['--no-color'],
{'follow': True},
cascade_stop,
event_stream=self.project.events(service_names=service_names))
print("Attaching to", list_containers(log_printer.containers))
log_printer.run()
if cascade_stop:
print("Aborting on container exit...")
self.project.stop(service_names=service_names, timeout=timeout)
@classmethod
def version(cls, options):
"""
Show version informations
Usage: version [--short]
Options:
--short Shows only Compose's version number.
"""
if options['--short']:
print(__version__)
else:
print(get_version_info('full'))
def convergence_strategy_from_opts(options):
no_recreate = options['--no-recreate']
force_recreate = options['--force-recreate']
if force_recreate and no_recreate:
raise UserError("--force-recreate and --no-recreate cannot be combined.")
if force_recreate:
return ConvergenceStrategy.always
if no_recreate:
return ConvergenceStrategy.never
return ConvergenceStrategy.changed
def image_type_from_opt(flag, value):
if not value:
return ImageType.none
try:
return ImageType[value]
except KeyError:
raise UserError("%s flag must be one of: all, local" % flag)
def build_action_from_opts(options):
if options['--build'] and options['--no-build']:
raise UserError("--build and --no-build can not be combined.")
if options['--build']:
return BuildAction.force
if options['--no-build']:
return BuildAction.skip
return BuildAction.none
def build_container_options(options, detach, command):
container_options = {
'command': command,
'tty': not (detach or options['-T'] or not sys.stdin.isatty()),
'stdin_open': not detach,
'detach': detach,
}
if options['-e']:
container_options['environment'] = parse_environment(options['-e'])
if options['--entrypoint']:
container_options['entrypoint'] = options.get('--entrypoint')
if options['--rm']:
container_options['restart'] = None
if options['--user']:
container_options['user'] = options.get('--user')
if not options['--service-ports']:
container_options['ports'] = []
if options['--publish']:
container_options['ports'] = options.get('--publish')
if options['--name']:
container_options['name'] = options['--name']
if options['--workdir']:
container_options['working_dir'] = options['--workdir']
return container_options
def run_one_off_container(container_options, project, service, options):
if not options['--no-deps']:
deps = service.get_dependency_names()
if deps:
project.up(
service_names=deps,
start_deps=True,
strategy=ConvergenceStrategy.never)
project.initialize()
container = service.create_container(
quiet=True,
one_off=True,
**container_options)
if options['-d']:
service.start_container(container)
print(container.name)
return
def remove_container(force=False):
if options['--rm']:
project.client.remove_container(container.id, force=True)
signals.set_signal_handler_to_shutdown()
try:
try:
operation = RunOperation(
project.client,
container.id,
interactive=not options['-T'],
logs=False,
)
pty = PseudoTerminal(project.client, operation)
sockets = pty.sockets()
service.start_container(container)
pty.start(sockets)
exit_code = container.wait()
except signals.ShutdownException:
project.client.stop(container.id)
exit_code = 1
except signals.ShutdownException:
project.client.kill(container.id)
remove_container(force=True)
sys.exit(2)
remove_container()
sys.exit(exit_code)
def log_printer_from_project(
project,
containers,
monochrome,
log_args,
cascade_stop=False,
event_stream=None,
):
return LogPrinter(
containers,
build_log_presenters(project.service_names, monochrome),
event_stream or project.events(),
cascade_stop=cascade_stop,
log_args=log_args)
def filter_containers_to_service_names(containers, service_names):
if not service_names:
return containers
return [
container
for container in containers if container.service in service_names
]
@contextlib.contextmanager
def up_shutdown_context(project, service_names, timeout, detached):
if detached:
yield
return
signals.set_signal_handler_to_shutdown()
try:
try:
yield
except signals.ShutdownException:
print("Gracefully stopping... (press Ctrl+C again to force)")
project.stop(service_names=service_names, timeout=timeout)
except signals.ShutdownException:
project.kill(service_names=service_names)
sys.exit(2)
def list_containers(containers):
return ", ".join(c.name for c in containers)
def exit_if(condition, message, exit_code):
if condition:
log.error(message)
raise SystemExit(exit_code)
|
|
"""Test the cross_validation module"""
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy import stats
from sklearn.exceptions import ConvergenceWarning
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.mocking import CheckingClassifier, MockDataFrame
with warnings.catch_warnings():
warnings.simplefilter('ignore')
from sklearn import cross_validation as cval
from sklearn.datasets import make_regression
from sklearn.datasets import load_boston
from sklearn.datasets import load_digits
from sklearn.datasets import load_iris
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import explained_variance_score
from sklearn.metrics import make_scorer
from sklearn.metrics import precision_score
from sklearn.externals import six
from sklearn.externals.six.moves import zip
from sklearn.linear_model import Ridge
from sklearn.multiclass import OneVsRestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.cluster import KMeans
from sklearn.preprocessing import Imputer
from sklearn.pipeline import Pipeline
class MockClassifier(object):
"""Dummy classifier to test the cross-validation"""
def __init__(self, a=0, allow_nd=False):
self.a = a
self.allow_nd = allow_nd
def fit(self, X, Y=None, sample_weight=None, class_prior=None,
sparse_sample_weight=None, sparse_param=None, dummy_int=None,
dummy_str=None, dummy_obj=None, callback=None):
"""The dummy arguments are to test that this fit function can
accept non-array arguments through cross-validation, such as:
- int
- str (this is actually array-like)
- object
- function
"""
self.dummy_int = dummy_int
self.dummy_str = dummy_str
self.dummy_obj = dummy_obj
if callback is not None:
callback(self)
if self.allow_nd:
X = X.reshape(len(X), -1)
if X.ndim >= 3 and not self.allow_nd:
raise ValueError('X cannot be d')
if sample_weight is not None:
assert_true(sample_weight.shape[0] == X.shape[0],
'MockClassifier extra fit_param sample_weight.shape[0]'
' is {0}, should be {1}'.format(sample_weight.shape[0],
X.shape[0]))
if class_prior is not None:
assert_true(class_prior.shape[0] == len(np.unique(y)),
'MockClassifier extra fit_param class_prior.shape[0]'
' is {0}, should be {1}'.format(class_prior.shape[0],
len(np.unique(y))))
if sparse_sample_weight is not None:
fmt = ('MockClassifier extra fit_param sparse_sample_weight'
'.shape[0] is {0}, should be {1}')
assert_true(sparse_sample_weight.shape[0] == X.shape[0],
fmt.format(sparse_sample_weight.shape[0], X.shape[0]))
if sparse_param is not None:
fmt = ('MockClassifier extra fit_param sparse_param.shape '
'is ({0}, {1}), should be ({2}, {3})')
assert_true(sparse_param.shape == P_sparse.shape,
fmt.format(sparse_param.shape[0],
sparse_param.shape[1],
P_sparse.shape[0], P_sparse.shape[1]))
return self
def predict(self, T):
if self.allow_nd:
T = T.reshape(len(T), -1)
return T[:, 0]
def score(self, X=None, Y=None):
return 1. / (1 + np.abs(self.a))
def get_params(self, deep=False):
return {'a': self.a, 'allow_nd': self.allow_nd}
X = np.ones((10, 2))
X_sparse = coo_matrix(X)
W_sparse = coo_matrix((np.array([1]), (np.array([1]), np.array([0]))),
shape=(10, 1))
P_sparse = coo_matrix(np.eye(5))
# avoid StratifiedKFold's Warning about least populated class in y
y = np.arange(10) % 3
##############################################################################
# Tests
def check_valid_split(train, test, n_samples=None):
# Use python sets to get more informative assertion failure messages
train, test = set(train), set(test)
# Train and test split should not overlap
assert_equal(train.intersection(test), set())
if n_samples is not None:
# Check that the union of train an test split cover all the indices
assert_equal(train.union(test), set(range(n_samples)))
def check_cv_coverage(cv, expected_n_iter=None, n_samples=None):
# Check that a all the samples appear at least once in a test fold
if expected_n_iter is not None:
assert_equal(len(cv), expected_n_iter)
else:
expected_n_iter = len(cv)
collected_test_samples = set()
iterations = 0
for train, test in cv:
check_valid_split(train, test, n_samples=n_samples)
iterations += 1
collected_test_samples.update(test)
# Check that the accumulated test samples cover the whole dataset
assert_equal(iterations, expected_n_iter)
if n_samples is not None:
assert_equal(collected_test_samples, set(range(n_samples)))
def test_kfold_valueerrors():
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.KFold, 3, 4)
# Check that a warning is raised if the least populated class has too few
# members.
y = [3, 3, -1, -1, 3]
cv = assert_warns_message(Warning, "The least populated class",
cval.StratifiedKFold, y, 3)
# Check that despite the warning the folds are still computed even
# though all the classes are not necessarily represented at on each
# side of the split at each split
check_cv_coverage(cv, expected_n_iter=3, n_samples=len(y))
# Check that errors are raised if all n_labels for individual
# classes are less than n_folds.
y = [3, 3, -1, -1, 2]
assert_raises(ValueError, cval.StratifiedKFold, y, 3)
# Error when number of folds is <= 1
assert_raises(ValueError, cval.KFold, 2, 0)
assert_raises(ValueError, cval.KFold, 2, 1)
error_string = ("k-fold cross validation requires at least one"
" train / test split")
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 0)
assert_raise_message(ValueError, error_string,
cval.StratifiedKFold, y, 1)
# When n is not integer:
assert_raises(ValueError, cval.KFold, 2.5, 2)
# When n_folds is not integer:
assert_raises(ValueError, cval.KFold, 5, 1.5)
assert_raises(ValueError, cval.StratifiedKFold, y, 1.5)
def test_kfold_indices():
# Check all indices are returned in the test folds
kf = cval.KFold(300, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=300)
# Check all indices are returned in the test folds even when equal-sized
# folds are not possible
kf = cval.KFold(17, 3)
check_cv_coverage(kf, expected_n_iter=3, n_samples=17)
def test_kfold_no_shuffle():
# Manually check that KFold preserves the data ordering on toy datasets
splits = iter(cval.KFold(4, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1])
assert_array_equal(train, [2, 3])
train, test = next(splits)
assert_array_equal(test, [2, 3])
assert_array_equal(train, [0, 1])
splits = iter(cval.KFold(5, 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 2])
assert_array_equal(train, [3, 4])
train, test = next(splits)
assert_array_equal(test, [3, 4])
assert_array_equal(train, [0, 1, 2])
def test_stratified_kfold_no_shuffle():
# Manually check that StratifiedKFold preserves the data ordering as much
# as possible on toy datasets in order to avoid hiding sample dependencies
# when possible
splits = iter(cval.StratifiedKFold([1, 1, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 2])
assert_array_equal(train, [1, 3])
train, test = next(splits)
assert_array_equal(test, [1, 3])
assert_array_equal(train, [0, 2])
splits = iter(cval.StratifiedKFold([1, 1, 1, 0, 0, 0, 0], 2))
train, test = next(splits)
assert_array_equal(test, [0, 1, 3, 4])
assert_array_equal(train, [2, 5, 6])
train, test = next(splits)
assert_array_equal(test, [2, 5, 6])
assert_array_equal(train, [0, 1, 3, 4])
def test_stratified_kfold_ratios():
# Check that stratified kfold preserves label ratios in individual splits
# Repeat with shuffling turned off and on
n_samples = 1000
labels = np.array([4] * int(0.10 * n_samples) +
[0] * int(0.89 * n_samples) +
[1] * int(0.01 * n_samples))
for shuffle in [False, True]:
for train, test in cval.StratifiedKFold(labels, 5, shuffle=shuffle):
assert_almost_equal(np.sum(labels[train] == 4) / len(train), 0.10,
2)
assert_almost_equal(np.sum(labels[train] == 0) / len(train), 0.89,
2)
assert_almost_equal(np.sum(labels[train] == 1) / len(train), 0.01,
2)
assert_almost_equal(np.sum(labels[test] == 4) / len(test), 0.10, 2)
assert_almost_equal(np.sum(labels[test] == 0) / len(test), 0.89, 2)
assert_almost_equal(np.sum(labels[test] == 1) / len(test), 0.01, 2)
def test_kfold_balance():
# Check that KFold returns folds with balanced sizes
for kf in [cval.KFold(i, 5) for i in range(11, 17)]:
sizes = []
for _, test in kf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), kf.n)
def test_stratifiedkfold_balance():
# Check that KFold returns folds with balanced sizes (only when
# stratification is possible)
# Repeat with shuffling turned off and on
labels = [0] * 3 + [1] * 14
for shuffle in [False, True]:
for skf in [cval.StratifiedKFold(labels[:i], 3, shuffle=shuffle)
for i in range(11, 17)]:
sizes = []
for _, test in skf:
sizes.append(len(test))
assert_true((np.max(sizes) - np.min(sizes)) <= 1)
assert_equal(np.sum(sizes), skf.n)
def test_shuffle_kfold():
# Check the indices are shuffled properly, and that all indices are
# returned in the different test folds
kf = cval.KFold(300, 3, shuffle=True, random_state=0)
ind = np.arange(300)
all_folds = None
for train, test in kf:
assert_true(np.any(np.arange(100) != ind[test]))
assert_true(np.any(np.arange(100, 200) != ind[test]))
assert_true(np.any(np.arange(200, 300) != ind[test]))
if all_folds is None:
all_folds = ind[test].copy()
else:
all_folds = np.concatenate((all_folds, ind[test]))
all_folds.sort()
assert_array_equal(all_folds, ind)
def test_shuffle_stratifiedkfold():
# Check that shuffling is happening when requested, and for proper
# sample coverage
labels = [0] * 20 + [1] * 20
kf0 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=0))
kf1 = list(cval.StratifiedKFold(labels, 5, shuffle=True, random_state=1))
for (_, test0), (_, test1) in zip(kf0, kf1):
assert_true(set(test0) != set(test1))
check_cv_coverage(kf0, expected_n_iter=5, n_samples=40)
def test_kfold_can_detect_dependent_samples_on_digits(): # see #2372
# The digits samples are dependent: they are apparently grouped by authors
# although we don't have any information on the groups segment locations
# for this data. We can highlight this fact be computing k-fold cross-
# validation with and without shuffling: we observe that the shuffling case
# wrongly makes the IID assumption and is therefore too optimistic: it
# estimates a much higher accuracy (around 0.96) than the non
# shuffling variant (around 0.86).
digits = load_digits()
X, y = digits.data[:800], digits.target[:800]
model = SVC(C=10, gamma=0.005)
n = len(y)
cv = cval.KFold(n, 5, shuffle=False)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
# Shuffling the data artificially breaks the dependency and hides the
# overfitting of the model with regards to the writing style of the authors
# by yielding a seriously overestimated score:
cv = cval.KFold(n, 5, shuffle=True, random_state=0)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
cv = cval.KFold(n, 5, shuffle=True, random_state=1)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(mean_score, 0.95)
# Similarly, StratifiedKFold should try to shuffle the data as little
# as possible (while respecting the balanced class constraints)
# and thus be able to detect the dependency by not overestimating
# the CV score either. As the digits dataset is approximately balanced
# the estimated mean score is close to the score measured with
# non-shuffled KFold
cv = cval.StratifiedKFold(y, 5)
mean_score = cval.cross_val_score(model, X, y, cv=cv).mean()
assert_greater(0.88, mean_score)
assert_greater(mean_score, 0.85)
def test_label_kfold():
rng = np.random.RandomState(0)
# Parameters of the test
n_labels = 15
n_samples = 1000
n_folds = 5
# Construct the test data
tolerance = 0.05 * n_samples # 5 percent error allowed
labels = rng.randint(0, n_labels, n_samples)
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
labels = np.asarray(labels, dtype=object)
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Construct the test data
labels = ['Albert', 'Jean', 'Bertrand', 'Michel', 'Jean',
'Francis', 'Robert', 'Michel', 'Rachel', 'Lois',
'Michelle', 'Bernard', 'Marion', 'Laura', 'Jean',
'Rachel', 'Franck', 'John', 'Gael', 'Anna', 'Alix',
'Robert', 'Marion', 'David', 'Tony', 'Abel', 'Becky',
'Madmood', 'Cary', 'Mary', 'Alexandre', 'David', 'Francis',
'Barack', 'Abdoul', 'Rasha', 'Xi', 'Silvia']
labels = np.asarray(labels, dtype=object)
n_labels = len(np.unique(labels))
n_samples = len(labels)
n_folds = 5
tolerance = 0.05 * n_samples # 5 percent error allowed
folds = cval.LabelKFold(labels, n_folds=n_folds).idxs
ideal_n_labels_per_fold = n_samples // n_folds
# Check that folds have approximately the same size
assert_equal(len(folds), len(labels))
for i in np.unique(folds):
assert_greater_equal(tolerance,
abs(sum(folds == i) - ideal_n_labels_per_fold))
# Check that each label appears only in 1 fold
for label in np.unique(labels):
assert_equal(len(np.unique(folds[labels == label])), 1)
# Check that no label is on both sides of the split
for train, test in cval.LabelKFold(labels, n_folds=n_folds):
assert_equal(len(np.intersect1d(labels[train], labels[test])), 0)
# Should fail if there are more folds than labels
labels = np.array([1, 1, 1, 2, 2])
assert_raises(ValueError, cval.LabelKFold, labels, n_folds=3)
def test_shuffle_split():
ss1 = cval.ShuffleSplit(10, test_size=0.2, random_state=0)
ss2 = cval.ShuffleSplit(10, test_size=2, random_state=0)
ss3 = cval.ShuffleSplit(10, test_size=np.int32(2), random_state=0)
for typ in six.integer_types:
ss4 = cval.ShuffleSplit(10, test_size=typ(2), random_state=0)
for t1, t2, t3, t4 in zip(ss1, ss2, ss3, ss4):
assert_array_equal(t1[0], t2[0])
assert_array_equal(t2[0], t3[0])
assert_array_equal(t3[0], t4[0])
assert_array_equal(t1[1], t2[1])
assert_array_equal(t2[1], t3[1])
assert_array_equal(t3[1], t4[1])
def test_stratified_shuffle_split_init():
y = np.asarray([0, 1, 1, 1, 2, 2, 2])
# Check that error is raised if there is a class with only one sample
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.2)
# Check that error is raised if the test set size is smaller than n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 2)
# Check that error is raised if the train set size is smaller than
# n_classes
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 3, 2)
y = np.asarray([0, 0, 0, 1, 1, 1, 2, 2, 2])
# Check that errors are raised if there is not enough samples
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.5, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 8, 0.6)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, 3, 0.6, 8)
# Train size or test size too small
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, train_size=2)
assert_raises(ValueError, cval.StratifiedShuffleSplit, y, test_size=2)
def test_stratified_shuffle_split_iter():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
np.array([-1] * 800 + [1] * 50)
]
for y in ys:
sss = cval.StratifiedShuffleSplit(y, 6, test_size=0.33,
random_state=0)
for train, test in sss:
assert_array_equal(np.unique(y[train]), np.unique(y[test]))
# Checks if folds keep classes proportions
p_train = (np.bincount(np.unique(y[train], return_inverse=True)[1])
/ float(len(y[train])))
p_test = (np.bincount(np.unique(y[test], return_inverse=True)[1])
/ float(len(y[test])))
assert_array_almost_equal(p_train, p_test, 1)
assert_equal(y[train].size + y[test].size, y.size)
assert_array_equal(np.intersect1d(train, test), [])
def test_stratified_shuffle_split_even():
# Test the StratifiedShuffleSplit, indices are drawn with a
# equal chance
n_folds = 5
n_iter = 1000
def assert_counts_are_ok(idx_counts, p):
# Here we test that the distribution of the counts
# per index is close enough to a binomial
threshold = 0.05 / n_splits
bf = stats.binom(n_splits, p)
for count in idx_counts:
p = bf.pmf(count)
assert_true(p > threshold,
"An index is not drawn with chance corresponding "
"to even draws")
for n_samples in (6, 22):
labels = np.array((n_samples // 2) * [0, 1])
splits = cval.StratifiedShuffleSplit(labels, n_iter=n_iter,
test_size=1. / n_folds,
random_state=0)
train_counts = [0] * n_samples
test_counts = [0] * n_samples
n_splits = 0
for train, test in splits:
n_splits += 1
for counter, ids in [(train_counts, train), (test_counts, test)]:
for id in ids:
counter[id] += 1
assert_equal(n_splits, n_iter)
assert_equal(len(train), splits.n_train)
assert_equal(len(test), splits.n_test)
assert_equal(len(set(train).intersection(test)), 0)
label_counts = np.unique(labels)
assert_equal(splits.test_size, 1.0 / n_folds)
assert_equal(splits.n_train + splits.n_test, len(labels))
assert_equal(len(label_counts), 2)
ex_test_p = float(splits.n_test) / n_samples
ex_train_p = float(splits.n_train) / n_samples
assert_counts_are_ok(train_counts, ex_train_p)
assert_counts_are_ok(test_counts, ex_test_p)
def test_stratified_shuffle_split_overlap_train_test_bug():
# See https://github.com/scikit-learn/scikit-learn/issues/6121 for
# the original bug report
labels = [0, 1, 2, 3] * 3 + [4, 5] * 5
splits = cval.StratifiedShuffleSplit(labels, n_iter=1,
test_size=0.5, random_state=0)
train, test = next(iter(splits))
assert_array_equal(np.intersect1d(train, test), [])
def test_predefinedsplit_with_kfold_split():
# Check that PredefinedSplit can reproduce a split generated by Kfold.
folds = -1 * np.ones(10)
kf_train = []
kf_test = []
for i, (train_ind, test_ind) in enumerate(cval.KFold(10, 5, shuffle=True)):
kf_train.append(train_ind)
kf_test.append(test_ind)
folds[test_ind] = i
ps_train = []
ps_test = []
ps = cval.PredefinedSplit(folds)
for train_ind, test_ind in ps:
ps_train.append(train_ind)
ps_test.append(test_ind)
assert_array_equal(ps_train, kf_train)
assert_array_equal(ps_test, kf_test)
def test_label_shuffle_split():
ys = [np.array([1, 1, 1, 1, 2, 2, 2, 3, 3, 3, 3, 3]),
np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3]),
np.array([0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]),
np.array([1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4]),
]
for y in ys:
n_iter = 6
test_size = 1. / 3
slo = cval.LabelShuffleSplit(y, n_iter, test_size=test_size,
random_state=0)
# Make sure the repr works
repr(slo)
# Test that the length is correct
assert_equal(len(slo), n_iter)
y_unique = np.unique(y)
for train, test in slo:
# First test: no train label is in the test set and vice versa
y_train_unique = np.unique(y[train])
y_test_unique = np.unique(y[test])
assert_false(np.any(np.in1d(y[train], y_test_unique)))
assert_false(np.any(np.in1d(y[test], y_train_unique)))
# Second test: train and test add up to all the data
assert_equal(y[train].size + y[test].size, y.size)
# Third test: train and test are disjoint
assert_array_equal(np.intersect1d(train, test), [])
# Fourth test: # unique train and test labels are correct,
# +- 1 for rounding error
assert_true(abs(len(y_test_unique) -
round(test_size * len(y_unique))) <= 1)
assert_true(abs(len(y_train_unique) -
round((1.0 - test_size) * len(y_unique))) <= 1)
def test_leave_label_out_changing_labels():
# Check that LeaveOneLabelOut and LeavePLabelOut work normally if
# the labels variable is changed before calling __iter__
labels = np.array([0, 1, 2, 1, 1, 2, 0, 0])
labels_changing = np.array(labels, copy=True)
lolo = cval.LeaveOneLabelOut(labels)
lolo_changing = cval.LeaveOneLabelOut(labels_changing)
lplo = cval.LeavePLabelOut(labels, p=2)
lplo_changing = cval.LeavePLabelOut(labels_changing, p=2)
labels_changing[:] = 0
for llo, llo_changing in [(lolo, lolo_changing), (lplo, lplo_changing)]:
for (train, test), (train_chan, test_chan) in zip(llo, llo_changing):
assert_array_equal(train, train_chan)
assert_array_equal(test, test_chan)
def test_cross_val_score():
clf = MockClassifier()
for a in range(-10, 10):
clf.a = a
# Smoke test
scores = cval.cross_val_score(clf, X, y)
assert_array_equal(scores, clf.score(X, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
scores = cval.cross_val_score(clf, X_sparse, y)
assert_array_equal(scores, clf.score(X_sparse, y))
# test with multioutput y
scores = cval.cross_val_score(clf, X_sparse, X)
assert_array_equal(scores, clf.score(X_sparse, X))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
scores = cval.cross_val_score(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
scores = cval.cross_val_score(clf, X, y.tolist())
assert_raises(ValueError, cval.cross_val_score, clf, X, y,
scoring="sklearn")
# test with 3d X and
X_3d = X[:, :, np.newaxis]
clf = MockClassifier(allow_nd=True)
scores = cval.cross_val_score(clf, X_3d, y)
clf = MockClassifier(allow_nd=False)
assert_raises(ValueError, cval.cross_val_score, clf, X_3d, y)
def test_cross_val_score_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_score(clf, X_df, y_ser)
def test_cross_val_score_mask():
# test that cross_val_score works with boolean masks
svm = SVC(kernel="linear")
iris = load_iris()
X, y = iris.data, iris.target
cv_indices = cval.KFold(len(y), 5)
scores_indices = cval.cross_val_score(svm, X, y, cv=cv_indices)
cv_indices = cval.KFold(len(y), 5)
cv_masks = []
for train, test in cv_indices:
mask_train = np.zeros(len(y), dtype=np.bool)
mask_test = np.zeros(len(y), dtype=np.bool)
mask_train[train] = 1
mask_test[test] = 1
cv_masks.append((train, test))
scores_masks = cval.cross_val_score(svm, X, y, cv=cv_masks)
assert_array_equal(scores_indices, scores_masks)
def test_cross_val_score_precomputed():
# test for svm with precomputed kernel
svm = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
linear_kernel = np.dot(X, X.T)
score_precomputed = cval.cross_val_score(svm, linear_kernel, y)
svm = SVC(kernel="linear")
score_linear = cval.cross_val_score(svm, X, y)
assert_array_equal(score_precomputed, score_linear)
# Error raised for non-square X
svm = SVC(kernel="precomputed")
assert_raises(ValueError, cval.cross_val_score, svm, X, y)
# test error is raised when the precomputed kernel is not array-like
# or sparse
assert_raises(ValueError, cval.cross_val_score, svm,
linear_kernel.tolist(), y)
def test_cross_val_score_fit_params():
clf = MockClassifier()
n_samples = X.shape[0]
n_classes = len(np.unique(y))
DUMMY_INT = 42
DUMMY_STR = '42'
DUMMY_OBJ = object()
def assert_fit_params(clf):
# Function to test that the values are passed correctly to the
# classifier arguments for non-array type
assert_equal(clf.dummy_int, DUMMY_INT)
assert_equal(clf.dummy_str, DUMMY_STR)
assert_equal(clf.dummy_obj, DUMMY_OBJ)
fit_params = {'sample_weight': np.ones(n_samples),
'class_prior': np.ones(n_classes) / n_classes,
'sparse_sample_weight': W_sparse,
'sparse_param': P_sparse,
'dummy_int': DUMMY_INT,
'dummy_str': DUMMY_STR,
'dummy_obj': DUMMY_OBJ,
'callback': assert_fit_params}
cval.cross_val_score(clf, X, y, fit_params=fit_params)
def test_cross_val_score_score_func():
clf = MockClassifier()
_score_func_args = []
def score_func(y_test, y_predict):
_score_func_args.append((y_test, y_predict))
return 1.0
with warnings.catch_warnings(record=True):
scoring = make_scorer(score_func)
score = cval.cross_val_score(clf, X, y, scoring=scoring)
assert_array_equal(score, [1.0, 1.0, 1.0])
assert len(_score_func_args) == 3
def test_cross_val_score_errors():
class BrokenEstimator:
pass
assert_raises(TypeError, cval.cross_val_score, BrokenEstimator(), X)
def test_train_test_split_errors():
assert_raises(ValueError, cval.train_test_split)
assert_raises(ValueError, cval.train_test_split, range(3), train_size=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), test_size=0.6,
train_size=0.6)
assert_raises(ValueError, cval.train_test_split, range(3),
test_size=np.float32(0.6), train_size=np.float32(0.6))
assert_raises(ValueError, cval.train_test_split, range(3),
test_size="wrong_type")
assert_raises(ValueError, cval.train_test_split, range(3), test_size=2,
train_size=4)
assert_raises(TypeError, cval.train_test_split, range(3),
some_argument=1.1)
assert_raises(ValueError, cval.train_test_split, range(3), range(42))
def test_train_test_split():
X = np.arange(100).reshape((10, 10))
X_s = coo_matrix(X)
y = np.arange(10)
# simple test
split = cval.train_test_split(X, y, test_size=None, train_size=.5)
X_train, X_test, y_train, y_test = split
assert_equal(len(y_test), len(y_train))
# test correspondence of X and y
assert_array_equal(X_train[:, 0], y_train * 10)
assert_array_equal(X_test[:, 0], y_test * 10)
# conversion of lists to arrays (deprecated?)
with warnings.catch_warnings(record=True):
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_array_equal(X_train, X_s_train.toarray())
assert_array_equal(X_test, X_s_test.toarray())
# don't convert lists to anything else by default
split = cval.train_test_split(X, X_s, y.tolist())
X_train, X_test, X_s_train, X_s_test, y_train, y_test = split
assert_true(isinstance(y_train, list))
assert_true(isinstance(y_test, list))
# allow nd-arrays
X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2)
y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11)
split = cval.train_test_split(X_4d, y_3d)
assert_equal(split[0].shape, (7, 5, 3, 2))
assert_equal(split[1].shape, (3, 5, 3, 2))
assert_equal(split[2].shape, (7, 7, 11))
assert_equal(split[3].shape, (3, 7, 11))
# test stratification option
y = np.array([1, 1, 1, 1, 2, 2, 2, 2])
for test_size, exp_test_size in zip([2, 4, 0.25, 0.5, 0.75],
[2, 4, 2, 4, 6]):
train, test = cval.train_test_split(y,
test_size=test_size,
stratify=y,
random_state=0)
assert_equal(len(test), exp_test_size)
assert_equal(len(test) + len(train), len(y))
# check the 1:1 ratio of ones and twos in the data is preserved
assert_equal(np.sum(train == 1), np.sum(train == 2))
def train_test_split_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [MockDataFrame]
try:
from pandas import DataFrame
types.append(DataFrame)
except ImportError:
pass
for InputFeatureType in types:
# X dataframe
X_df = InputFeatureType(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, InputFeatureType))
assert_true(isinstance(X_test, InputFeatureType))
def train_test_split_mock_pandas():
# X mock dataframe
X_df = MockDataFrame(X)
X_train, X_test = cval.train_test_split(X_df)
assert_true(isinstance(X_train, MockDataFrame))
assert_true(isinstance(X_test, MockDataFrame))
def test_cross_val_score_with_score_func_classification():
iris = load_iris()
clf = SVC(kernel='linear')
# Default score (should be the accuracy score)
scores = cval.cross_val_score(clf, iris.data, iris.target, cv=5)
assert_array_almost_equal(scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# Correct classification score (aka. zero / one score) - should be the
# same as the default estimator score
zo_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="accuracy", cv=5)
assert_array_almost_equal(zo_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
# F1 score (class are balanced so f1_score should be equal to zero/one
# score
f1_scores = cval.cross_val_score(clf, iris.data, iris.target,
scoring="f1_weighted", cv=5)
assert_array_almost_equal(f1_scores, [0.97, 1., 0.97, 0.97, 1.], 2)
def test_cross_val_score_with_score_func_regression():
X, y = make_regression(n_samples=30, n_features=20, n_informative=5,
random_state=0)
reg = Ridge()
# Default score of the Ridge regression estimator
scores = cval.cross_val_score(reg, X, y, cv=5)
assert_array_almost_equal(scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# R2 score (aka. determination coefficient) - should be the
# same as the default estimator score
r2_scores = cval.cross_val_score(reg, X, y, scoring="r2", cv=5)
assert_array_almost_equal(r2_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
# Mean squared error; this is a loss function, so "scores" are negative
mse_scores = cval.cross_val_score(reg, X, y, cv=5,
scoring="mean_squared_error")
expected_mse = np.array([-763.07, -553.16, -274.38, -273.26, -1681.99])
assert_array_almost_equal(mse_scores, expected_mse, 2)
# Explained variance
scoring = make_scorer(explained_variance_score)
ev_scores = cval.cross_val_score(reg, X, y, cv=5, scoring=scoring)
assert_array_almost_equal(ev_scores, [0.94, 0.97, 0.97, 0.99, 0.92], 2)
def test_permutation_score():
iris = load_iris()
X = iris.data
X_sparse = coo_matrix(X)
y = iris.target
svm = SVC(kernel='linear')
cv = cval.StratifiedKFold(y, 2)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_greater(score, 0.9)
assert_almost_equal(pvalue, 0.0, 1)
score_label, _, pvalue_label = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy",
labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# check that we obtain the same results with a sparse representation
svm_sparse = SVC(kernel='linear')
cv_sparse = cval.StratifiedKFold(y, 2)
score_label, _, pvalue_label = cval.permutation_test_score(
svm_sparse, X_sparse, y, n_permutations=30, cv=cv_sparse,
scoring="accuracy", labels=np.ones(y.size), random_state=0)
assert_true(score_label == score)
assert_true(pvalue_label == pvalue)
# test with custom scoring object
def custom_score(y_true, y_pred):
return (((y_true == y_pred).sum() - (y_true != y_pred).sum())
/ y_true.shape[0])
scorer = make_scorer(custom_score)
score, _, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=100, scoring=scorer, cv=cv, random_state=0)
assert_almost_equal(score, .93, 2)
assert_almost_equal(pvalue, 0.01, 3)
# set random y
y = np.mod(np.arange(len(y)), 3)
score, scores, pvalue = cval.permutation_test_score(
svm, X, y, n_permutations=30, cv=cv, scoring="accuracy")
assert_less(score, 0.5)
assert_greater(pvalue, 0.2)
def test_cross_val_generator_with_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
# explicitly passing indices value is deprecated
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
ss = cval.ShuffleSplit(2)
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
@ignore_warnings
def test_cross_val_generator_with_default_indices():
X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
y = np.array([1, 1, 2, 2])
labels = np.array([1, 2, 3, 4])
loo = cval.LeaveOneOut(4)
lpo = cval.LeavePOut(4, 2)
kf = cval.KFold(4, 2)
skf = cval.StratifiedKFold(y, 2)
lolo = cval.LeaveOneLabelOut(labels)
lopo = cval.LeavePLabelOut(labels, 2)
ss = cval.ShuffleSplit(2)
ps = cval.PredefinedSplit([1, 1, 2, 2])
for cv in [loo, lpo, kf, skf, lolo, lopo, ss, ps]:
for train, test in cv:
assert_not_equal(np.asarray(train).dtype.kind, 'b')
assert_not_equal(np.asarray(train).dtype.kind, 'b')
X[train], X[test]
y[train], y[test]
def test_shufflesplit_errors():
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=2.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=1.0)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=0.1,
train_size=0.95)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=11)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=10)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=8, train_size=3)
assert_raises(ValueError, cval.ShuffleSplit, 10, train_size=1j)
assert_raises(ValueError, cval.ShuffleSplit, 10, test_size=None,
train_size=None)
def test_shufflesplit_reproducible():
# Check that iterating twice on the ShuffleSplit gives the same
# sequence of train-test when the random_state is given
ss = cval.ShuffleSplit(10, random_state=21)
assert_array_equal(list(a for a, b in ss), list(a for a, b in ss))
def test_safe_split_with_precomputed_kernel():
clf = SVC()
clfp = SVC(kernel="precomputed")
iris = load_iris()
X, y = iris.data, iris.target
K = np.dot(X, X.T)
cv = cval.ShuffleSplit(X.shape[0], test_size=0.25, random_state=0)
tr, te = list(cv)[0]
X_tr, y_tr = cval._safe_split(clf, X, y, tr)
K_tr, y_tr2 = cval._safe_split(clfp, K, y, tr)
assert_array_almost_equal(K_tr, np.dot(X_tr, X_tr.T))
X_te, y_te = cval._safe_split(clf, X, y, te, tr)
K_te, y_te2 = cval._safe_split(clfp, K, y, te, tr)
assert_array_almost_equal(K_te, np.dot(X_te, X_tr.T))
def test_cross_val_score_allow_nans():
# Check that cross_val_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.cross_val_score(p, X, y, cv=5)
def test_train_test_split_allow_nans():
# Check that train_test_split allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
cval.train_test_split(X, y, test_size=0.2, random_state=42)
def test_permutation_test_score_allow_nans():
# Check that permutation_test_score allows input data with NaNs
X = np.arange(200, dtype=np.float64).reshape(10, -1)
X[2, :] = np.nan
y = np.repeat([0, 1], X.shape[0] / 2)
p = Pipeline([
('imputer', Imputer(strategy='mean', missing_values='NaN')),
('classifier', MockClassifier()),
])
cval.permutation_test_score(p, X, y, cv=5)
def test_check_cv_return_types():
X = np.ones((9, 2))
cv = cval.check_cv(3, X, classifier=False)
assert_true(isinstance(cv, cval.KFold))
y_binary = np.array([0, 1, 0, 1, 0, 0, 1, 1, 1])
cv = cval.check_cv(3, X, y_binary, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
y_multiclass = np.array([0, 1, 0, 1, 2, 1, 2, 0, 2])
cv = cval.check_cv(3, X, y_multiclass, classifier=True)
assert_true(isinstance(cv, cval.StratifiedKFold))
X = np.ones((5, 2))
y_multilabel = [[1, 0, 1], [1, 1, 0], [0, 0, 0], [0, 1, 1], [1, 0, 0]]
cv = cval.check_cv(3, X, y_multilabel, classifier=True)
assert_true(isinstance(cv, cval.KFold))
y_multioutput = np.array([[1, 2], [0, 3], [0, 0], [3, 1], [2, 0]])
cv = cval.check_cv(3, X, y_multioutput, classifier=True)
assert_true(isinstance(cv, cval.KFold))
def test_cross_val_score_multilabel():
X = np.array([[-3, 4], [2, 4], [3, 3], [0, 2], [-3, 1],
[-2, 1], [0, 0], [-2, -1], [-1, -2], [1, -2]])
y = np.array([[1, 1], [0, 1], [0, 1], [0, 1], [1, 1],
[0, 1], [1, 0], [1, 1], [1, 0], [0, 0]])
clf = KNeighborsClassifier(n_neighbors=1)
scoring_micro = make_scorer(precision_score, average='micro')
scoring_macro = make_scorer(precision_score, average='macro')
scoring_samples = make_scorer(precision_score, average='samples')
score_micro = cval.cross_val_score(clf, X, y, scoring=scoring_micro, cv=5)
score_macro = cval.cross_val_score(clf, X, y, scoring=scoring_macro, cv=5)
score_samples = cval.cross_val_score(clf, X, y,
scoring=scoring_samples, cv=5)
assert_almost_equal(score_micro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 3])
assert_almost_equal(score_macro, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
assert_almost_equal(score_samples, [1, 1 / 2, 3 / 4, 1 / 2, 1 / 4])
def test_cross_val_predict():
boston = load_boston()
X, y = boston.data, boston.target
cv = cval.KFold(len(boston.target))
est = Ridge()
# Naive loop (should be same as cross_val_predict):
preds2 = np.zeros_like(y)
for train, test in cv:
est.fit(X[train], y[train])
preds2[test] = est.predict(X[test])
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_array_almost_equal(preds, preds2)
preds = cval.cross_val_predict(est, X, y)
assert_equal(len(preds), len(y))
cv = cval.LeaveOneOut(len(y))
preds = cval.cross_val_predict(est, X, y, cv=cv)
assert_equal(len(preds), len(y))
Xsp = X.copy()
Xsp *= (Xsp > np.median(Xsp))
Xsp = coo_matrix(Xsp)
preds = cval.cross_val_predict(est, Xsp, y)
assert_array_almost_equal(len(preds), len(y))
preds = cval.cross_val_predict(KMeans(), X)
assert_equal(len(preds), len(y))
def bad_cv():
for i in range(4):
yield np.array([0, 1, 2, 3]), np.array([4, 5, 6, 7, 8])
assert_raises(ValueError, cval.cross_val_predict, est, X, y, cv=bad_cv())
def test_cross_val_predict_input_types():
clf = Ridge()
# Smoke test
predictions = cval.cross_val_predict(clf, X, y)
assert_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_equal(predictions.shape, (10, 2))
predictions = cval.cross_val_predict(clf, X_sparse, y)
assert_array_equal(predictions.shape, (10,))
# test with multioutput y
with ignore_warnings(category=ConvergenceWarning):
predictions = cval.cross_val_predict(clf, X_sparse, X)
assert_array_equal(predictions.shape, (10, 2))
# test with X and y as list
list_check = lambda x: isinstance(x, list)
clf = CheckingClassifier(check_X=list_check)
predictions = cval.cross_val_predict(clf, X.tolist(), y.tolist())
clf = CheckingClassifier(check_y=list_check)
predictions = cval.cross_val_predict(clf, X, y.tolist())
# test with 3d X and
X_3d = X[:, :, np.newaxis]
check_3d = lambda x: x.ndim == 3
clf = CheckingClassifier(check_X=check_3d)
predictions = cval.cross_val_predict(clf, X_3d, y)
assert_array_equal(predictions.shape, (10,))
def test_cross_val_predict_pandas():
# check cross_val_score doesn't destroy pandas dataframe
types = [(MockDataFrame, MockDataFrame)]
try:
from pandas import Series, DataFrame
types.append((Series, DataFrame))
except ImportError:
pass
for TargetType, InputFeatureType in types:
# X dataframe, y series
X_df, y_ser = InputFeatureType(X), TargetType(y)
check_df = lambda x: isinstance(x, InputFeatureType)
check_series = lambda x: isinstance(x, TargetType)
clf = CheckingClassifier(check_X=check_df, check_y=check_series)
cval.cross_val_predict(clf, X_df, y_ser)
def test_sparse_fit_params():
iris = load_iris()
X, y = iris.data, iris.target
clf = MockClassifier()
fit_params = {'sparse_sample_weight': coo_matrix(np.eye(X.shape[0]))}
a = cval.cross_val_score(clf, X, y, fit_params=fit_params)
assert_array_equal(a, np.ones(3))
def test_check_is_partition():
p = np.arange(100)
assert_true(cval._check_is_partition(p, 100))
assert_false(cval._check_is_partition(np.delete(p, 23), 100))
p[0] = 23
assert_false(cval._check_is_partition(p, 100))
def test_cross_val_predict_sparse_prediction():
# check that cross_val_predict gives same result for sparse and dense input
X, y = make_multilabel_classification(n_classes=2, n_labels=1,
allow_unlabeled=False,
return_indicator=True,
random_state=1)
X_sparse = csr_matrix(X)
y_sparse = csr_matrix(y)
classif = OneVsRestClassifier(SVC(kernel='linear'))
preds = cval.cross_val_predict(classif, X, y, cv=10)
preds_sparse = cval.cross_val_predict(classif, X_sparse, y_sparse, cv=10)
preds_sparse = preds_sparse.toarray()
assert_array_almost_equal(preds_sparse, preds)
|
|
#!/usr/bin/env python3
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import unittest
import copy
import config
import thread_cert
from pktverify.consts import WIRESHARK_OVERRIDE_PREFS, MLE_CHILD_UPDATE_REQUEST, MLE_CHILD_UPDATE_RESPONSE, MLE_CHILD_ID_REQUEST, MLE_CHILD_ID_RESPONSE, RESPONSE_TLV, LINK_LAYER_FRAME_COUNTER_TLV, MODE_TLV, TIMEOUT_TLV, VERSION_TLV, TLV_REQUEST_TLV, ADDRESS16_TLV, NETWORK_DATA_TLV, ROUTE64_TLV, MODE_TLV, TIMEOUT_TLV, CHALLENGE_TLV, SOURCE_ADDRESS_TLV, LEADER_DATA_TLV, ADDRESS_REGISTRATION_TLV
from pktverify.packet_verifier import PacketVerifier
from pktverify.addrs import Ipv6Addr
LEADER = 1
ROUTER = 2
SED1 = 3
MED1 = 4
MTDS = [SED1, MED1]
PREFIX_2001 = '2001::/64'
PREFIX_2002 = '2002::/64'
# Test Purpose and Description:
# -----------------------------
# The purpose of this test case is to verify that the DUT, as a Border Router,
# acts properly as a Leader device in a Thread network, correctly sets the
# Network Data (stable/non-stable) and successfully propagates the Network Data
# to the devices that attach to it.
#
# Test Topology:
# -------------
# SED
# |
# ROUTER - Leader(DUT) - MED
#
# DUT Types:
# ----------
# Leader
class Cert_7_1_1_BorderRouterAsLeader(thread_cert.TestCase):
USE_MESSAGE_FACTORY = False
TOPOLOGY = {
LEADER: {
'name': 'LEADER',
'mode': 'rdn',
'panid': 0xface,
'allowlist': [ROUTER, SED1, MED1]
},
ROUTER: {
'name': 'ROUTER',
'mode': 'rdn',
'panid': 0xface,
'router_selection_jitter': 1,
'allowlist': [LEADER]
},
SED1: {
'name': 'SED',
'is_mtd': True,
'mode': '-',
'panid': 0xface,
'timeout': config.DEFAULT_CHILD_TIMEOUT,
'allowlist': [LEADER]
},
MED1: {
'name': 'MED',
'is_mtd': True,
'mode': 'rn',
'panid': 0xface,
'allowlist': [LEADER]
},
}
# override wireshark preferences with case needed parameters
CASE_WIRESHARK_PREFS = copy.deepcopy(WIRESHARK_OVERRIDE_PREFS)
CASE_WIRESHARK_PREFS['6lowpan.context1'] = PREFIX_2001
CASE_WIRESHARK_PREFS['6lowpan.context2'] = PREFIX_2002
def test(self):
self.nodes[LEADER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[LEADER].get_state(), 'leader')
self.nodes[LEADER].add_prefix(PREFIX_2001, 'paros')
self.nodes[LEADER].add_prefix(PREFIX_2002, 'paro')
self.nodes[LEADER].register_netdata()
self.nodes[ROUTER].start()
self.simulator.go(5)
self.assertEqual(self.nodes[ROUTER].get_state(), 'router')
self.nodes[SED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[SED1].get_state(), 'child')
self.nodes[MED1].start()
self.simulator.go(5)
self.assertEqual(self.nodes[MED1].get_state(), 'child')
def verify(self, pv):
pkts = pv.pkts
pv.summary.show()
LEADER = pv.vars['LEADER']
ROUTER = pv.vars['ROUTER']
MED = pv.vars['MED']
SED = pv.vars['SED']
# Step 1: The DUT forms the network properly sends MLE Advertisements
pkts.filter_wpan_src64(LEADER).\
filter_mle_advertisement('Leader').\
must_next()
# Step 3: Router attaches to the Leader (DUT) and requests complete
# network data
pkts.filter_wpan_src64(ROUTER).\
filter_wpan_dst64(LEADER).\
filter_mle_cmd(MLE_CHILD_ID_REQUEST).\
filter(lambda p: {
RESPONSE_TLV,
LINK_LAYER_FRAME_COUNTER_TLV,
MODE_TLV,
TIMEOUT_TLV,
VERSION_TLV,
TLV_REQUEST_TLV,
ADDRESS16_TLV,
NETWORK_DATA_TLV,
ROUTE64_TLV
} < set(p.mle.tlv.type) and\
p.mle.tlv.mode.network_data == 1
).\
must_next()
# Step 4: The DUT MUST send a MLE Child ID Response to Router,
# including the following TLVs:
# - Network Data TLV
# At least two Prefix TLVs (Prefix 1 and Prefix 2),
# each including:
# - 6LoWPAN ID sub-TLV
# - Border Router sub-TLV
pkts.filter_wpan_src64(LEADER).\
filter_wpan_dst64(ROUTER).\
filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
filter(lambda p: {
Ipv6Addr(PREFIX_2001[:-3]),
Ipv6Addr(PREFIX_2002[:-3])
} == set(p.thread_nwd.tlv.prefix) and\
p.thread_nwd.tlv.border_router.flag.p == [1, 1] and\
p.thread_nwd.tlv.border_router.flag.s == [1, 1] and\
p.thread_nwd.tlv.border_router.flag.r == [1, 1] and\
p.thread_nwd.tlv.border_router.flag.o == [1, 1] and\
p.thread_nwd.tlv.stable == [0, 1, 1, 1, 0, 0, 0]
).\
must_next()
# Step 5: SED attaches to the Leader (DUT) and requests only stable
# network data
pkts.filter_wpan_src64(SED).\
filter_wpan_dst64(LEADER).\
filter_mle_cmd(MLE_CHILD_ID_REQUEST).\
filter(lambda p: {
RESPONSE_TLV,
LINK_LAYER_FRAME_COUNTER_TLV,
MODE_TLV,
TIMEOUT_TLV,
VERSION_TLV,
TLV_REQUEST_TLV,
ADDRESS16_TLV,
NETWORK_DATA_TLV,
ADDRESS_REGISTRATION_TLV
} <= set(p.mle.tlv.type) and\
p.mle.tlv.mode.network_data == 0
).\
must_next()
# Step 6: The DUT MUST send a MLE Child ID Response to SED,
# including the following TLVs:
# - Network Data TLV
# At least one Prefix TLVs (Prefix 1),including:
# - 6LoWPAN ID sub-TLV
# - Border Router sub-TLV
# - P_border_router_16 <0xFFFE>
# Prefix 2 TLV MUST NOT be included
pkts.filter_wpan_src64(LEADER).\
filter_wpan_dst64(SED).\
filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
filter(lambda p: {
MODE_TLV,
TIMEOUT_TLV,
CHALLENGE_TLV
} == set(p.thread_nwd.tlv.type) and\
[Ipv6Addr(PREFIX_2001[:-3])] == p.thread_nwd.tlv.prefix and\
p.thread_nwd.tlv.border_router.flag.p == [1] and\
p.thread_nwd.tlv.border_router.flag.s == [1] and\
p.thread_nwd.tlv.border_router.flag.r == [1] and\
p.thread_nwd.tlv.border_router.flag.o == [1] and\
p.thread_nwd.tlv.stable == [1, 1, 1]
).\
must_next()
lstart = pkts.index
# Step 7: MED attaches to the Leader (DUT) and requests complete
# network data
pkts.filter_wpan_src64(MED).\
filter_wpan_dst64(LEADER).\
filter_mle_cmd(MLE_CHILD_ID_REQUEST).\
filter(lambda p: {
RESPONSE_TLV,
LINK_LAYER_FRAME_COUNTER_TLV,
MODE_TLV,
TIMEOUT_TLV,
VERSION_TLV,
TLV_REQUEST_TLV,
ADDRESS16_TLV,
NETWORK_DATA_TLV,
ADDRESS_REGISTRATION_TLV
} < set(p.mle.tlv.type) and\
p.mle.tlv.mode.network_data == 1
).\
must_next()
# Step 8: The DUT MUST send a MLE Child ID Response to MED,
# including the following TLVs:
# - Network Data TLV
# At least two Prefix TLVs (Prefix 1 and Prefix 2),
# each including:
# - 6LoWPAN ID sub-TLV
# - Border Router sub-TLV
pkts.filter_wpan_src64(LEADER).\
filter_wpan_dst64(MED).\
filter_mle_cmd(MLE_CHILD_ID_RESPONSE).\
filter(lambda p: {
Ipv6Addr(PREFIX_2001[:-3]),
Ipv6Addr(PREFIX_2002[:-3])
} == set(p.thread_nwd.tlv.prefix) and\
p.thread_nwd.tlv.border_router.flag.p == [1, 1] and\
p.thread_nwd.tlv.border_router.flag.s == [1, 1] and\
p.thread_nwd.tlv.border_router.flag.r == [1, 1] and\
p.thread_nwd.tlv.border_router.flag.o == [1, 1] and\
p.thread_nwd.tlv.stable == [0, 1, 1, 1, 0, 0, 0]
).\
must_next()
# Step 9: After attaching, each Child automatically sends its global address
# configured to the Leader, in the Address Registration TLV from the
# Child Update request command
# Step 10: The DUT MUST send a MLE Child Update Response, each, to MED & SED
# The following TLVs MUST be present in the Child Update Response:
# - Source Address TLV
# - Address Registration TLV
# - Echoes back addresses configured in step 9
# - Mode TLV
for child in (SED, MED):
_pkt = pkts.range(lstart).\
filter_wpan_src64(child).\
filter_wpan_dst64(LEADER).\
filter_mle_cmd(MLE_CHILD_UPDATE_REQUEST).\
must_next()
pkts.range(lstart).\
filter_wpan_src64(LEADER).\
filter_wpan_dst64(child).\
filter_mle_cmd(MLE_CHILD_UPDATE_RESPONSE).\
filter(lambda p: {
SOURCE_ADDRESS_TLV,
MODE_TLV,
ADDRESS_REGISTRATION_TLV
} < set(p.mle.tlv.type) and\
set(p.mle.tlv.addr_reg_iid) < set(_pkt.mle.tlv.addr_reg_iid)
).\
must_next()
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
from experiments.nmt import\
RNNEncoderDecoder,\
parse_input,\
get_batch_iterator,\
prototype_state
logger = logging.getLogger(__name__)
class BatchTxtIterator(object):
def __init__(self, state, txt, indx, batch_size, raise_unk, unk_sym=-1, null_sym=-1):
self.__dict__.update(locals())
self.__dict__.pop('self')
def start(self):
self.txt_file = open(self.txt)
def _pack(self, seqs):
num = len(seqs)
max_len = max(map(len, seqs))
x = numpy.zeros((num, max_len), dtype="int64")
x_mask = numpy.zeros((num, max_len), dtype="float32")
for i, seq in enumerate(seqs):
x[i, :len(seq)] = seq
x_mask[i, :len(seq)] = 1.0
return x.T, x_mask.T
def __iter__(self):
return self
def next(self):
seqs = []
try:
while len(seqs) < self.batch_size:
line = next(self.txt_file).strip()
seq, _ = parse_input(self.state, self.indx, line, raise_unk=self.raise_unk,
unk_sym=self.unk_sym, null_sym=self.null_sym)
seqs.append(seq)
return self._pack(seqs)
except StopIteration:
if not seqs:
raise StopIteration()
return self._pack(seqs)
class BatchBiTxtIterator(object):
def __init__(self, state, src, indx_src, trg, indx_trg, batch_size, raise_unk):
self.__dict__.update(locals())
self.__dict__.pop('self')
self.src_iter = BatchTxtIterator(state, src, indx_src, batch_size, raise_unk,
unk_sym=state['unk_sym_source'], null_sym=state['null_sym_source'])
self.trg_iter = BatchTxtIterator(state, trg, indx_trg, batch_size, raise_unk,
unk_sym=state['unk_sym_target'], null_sym=state['null_sym_target'])
def start(self):
self.src_iter.start()
self.trg_iter.start()
def __iter__(self):
return self
def next(self):
x, x_mask = next(self.src_iter)
y, y_mask = next(self.trg_iter)
assert x.shape[1] == y.shape[1]
return dict(x=x, x_mask=x_mask, y=y, y_mask=y_mask)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--state", required=True, help="State to use")
# Paths
parser.add_argument("--src", help="Source phrases")
parser.add_argument("--trg", help="Target phrases")
parser.add_argument("--scores", default=None, help="Save scores to")
parser.add_argument("model_path", help="Path to the model")
# Options
parser.add_argument("--print-probs", default=False, action="store_true",
help="Print probs instead of log probs")
parser.add_argument("--allow-unk", default=False, action="store_true",
help="Allow unknown words in the input")
parser.add_argument("--mode", default="interact",
help="Processing mode, one of 'batch', 'txt', 'interact'")
parser.add_argument("--n-batches", default=-1, type=int,
help="Score only first n batches")
parser.add_argument("--verbose", default=False, action="store_true",
help="Print more stuff")
parser.add_argument("--y-noise", type=float,
help="Probability for a word to be replaced by a random word")
# Additional arguments
parser.add_argument("changes", nargs="?", help="Changes to state", default="")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
state['sort_k_batches'] = 1 # which means don't sort
state['shuffle'] = False
state['use_infinite_loop'] = False
state['force_enc_repr_cpu'] = False
logging.basicConfig(level=getattr(logging, state['level']), format="%(asctime)s: %(name)s: %(levelname)s: %(message)s")
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True, compute_alignment=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word_src = cPickle.load(open(state['word_indx'],'rb'))
indx_word_trgt = cPickle.load(open(state['word_indx_trgt'], 'rb'))
if args.mode == "batch":
data_given = args.src or args.trg
txt = data_given and not (args.src.endswith(".h5") and args.trg.endswith(".h5"))
if data_given and not txt:
state['source'] = [args.src]
state['target'] = [args.trg]
if not data_given and not txt:
logger.info("Using the training data")
if txt:
data_iter = BatchBiTxtIterator(state,
args.src, indx_word_src, args.trg, indx_word_trgt,
state['bs'], raise_unk=not args.allow_unk)
data_iter.start()
else:
data_iter = get_batch_iterator(state)
data_iter.start(0)
score_file = open(args.scores, "w") if args.scores else sys.stdout
scorer = enc_dec.create_scorer(batch=True)
count = 0
n_samples = 0
logger.info('Scoring phrases')
for i, batch in enumerate(data_iter):
if batch == None:
continue
if args.n_batches >= 0 and i == args.n_batches:
break
if args.y_noise:
y = batch['y']
random_words = numpy.random.randint(0, 100, y.shape).astype("int64")
change_mask = numpy.random.binomial(1, args.y_noise, y.shape).astype("int64")
y = change_mask * random_words + (1 - change_mask) * y
batch['y'] = y
st = time.time()
[scores] = scorer(batch['x'], batch['y'],
batch['x_mask'], batch['y_mask'])
if args.print_probs:
scores = numpy.exp(scores)
up_time = time.time() - st
for s in scores:
print >>score_file, "{:.5e}".format(float(s))
n_samples += batch['x'].shape[1]
count += 1
if count % 100 == 0:
score_file.flush()
logger.debug("Scores flushed")
logger.debug("{} batches, {} samples, {} per sample; example scores: {}".format(
count, n_samples, up_time/scores.shape[0], scores[:5]))
logger.info("Done")
score_file.flush()
elif args.mode == "interact":
scorer = enc_dec.create_scorer()
while True:
try:
compute_probs = enc_dec.create_probs_computer()
src_line = raw_input('Source sequence: ')
trgt_line = raw_input('Target sequence: ')
src_seq = parse_input(state, indx_word_src, src_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_source'], null_sym=state['null_sym_source'])
trgt_seq = parse_input(state, indx_word_trgt, trgt_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_target'], null_sym=state['null_sym_target'])
print "Binarized source: ", src_seq
print "Binarized target: ", trgt_seq
probs = compute_probs(src_seq, trgt_seq)
print "Probs: {}, cost: {}".format(probs, -numpy.sum(numpy.log(probs)))
except Exception:
traceback.print_exc()
elif args.mode == "txt":
assert args.src and args.trg
scorer = enc_dec.create_scorer()
src_file = open(args.src, "r")
trg_file = open(args.trg, "r")
compute_probs = enc_dec.create_probs_computer(return_alignment=True)
try:
numpy.set_printoptions(precision=3, linewidth=150, suppress=True)
i = 0
while True:
src_line = next(src_file).strip()
trgt_line = next(trg_file).strip()
src_seq, src_words = parse_input(state,
indx_word_src, src_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_source'], null_sym=state['null_sym_source'])
trgt_seq, trgt_words = parse_input(state,
indx_word_trgt, trgt_line, raise_unk=not args.allow_unk,
unk_sym=state['unk_sym_target'], null_sym=state['null_sym_target'])
probs, alignment = compute_probs(src_seq, trgt_seq)
if args.verbose:
print "Probs: ", probs.flatten()
if alignment.ndim == 3:
print "Alignment:".ljust(20), src_line, "<eos>"
for i, word in enumerate(trgt_words):
print "{}{}".format(word.ljust(20), alignment[i, :, 0])
print "Generated by:"
for i, word in enumerate(trgt_words):
j = numpy.argmax(alignment[i, :, 0])
print "{} <--- {}".format(word,
src_words[j] if j < len(src_words) else "<eos>")
i += 1
if i % 100 == 0:
sys.stdout.flush()
logger.debug(i)
print -numpy.sum(numpy.log(probs))
except StopIteration:
pass
else:
raise Exception("Unknown mode {}".format(args.mode))
if __name__ == "__main__":
main()
|
|
from __future__ import unicode_literals, print_function, division
from pcapparser.printer import HttpPrinter
__author__ = "dongliu"
import sys
import argparse
import socket
import select
import threading
import signal
from pcapparser.httpparser import HttpType, HttpParser
from pcapparser import config
_BUF_SIZE = 8192
_MAX_READ_RETRY_COUNT = 20
_READ_TIMEOUT = 3
class ConnectionHandler(object):
"""handle one connection from client"""
def __init__(self, client_socket):
self.client_socket = client_socket
self.first_data = b''
self.http_type = HttpType.REQUEST
self.remote_host = None
self.path = None
self.method = None
self.protocol = None
self.target_socket = None
def init_connect(self):
end = -1
while True:
self.first_data += self.client_socket.recv(_BUF_SIZE)
end = self.first_data.find(b'\n')
if end != -1:
break
self.method, self.path, self.protocol = self.first_data[:end + 1].split()
if self.method == b'CONNECT':
self.first_data = self.first_data[end + 1:]
self._method_connect()
elif self.method in (b'OPTIONS', b'GET', b'HEAD', b'POST', b'PUT', b'DELETE', b'TRACE'):
self._method_others()
def close(self):
self.client_socket.close()
self.target_socket.close()
def _method_connect(self):
"""for http proxy connect method. it is usually for https proxy"""
self._connect_target(self.path)
self.client_socket.send(
b'HTTP/1.1 200 Connection established\nProxy-agent: Python Proxy\n\n')
def _method_others(self):
self.path = self.path[len(b'http://'):]
i = self.path.find(b'/')
if i > 0:
host = self.path[:i]
else:
host = self.path
self._connect_target(host)
def _connect_target(self, host):
i = host.find(b':')
if i != -1:
port_str = host[i + 1:]
if port_str:
port = int(host[i + 1:])
else:
port = 80
host = host[:i]
else:
port = 80
(soc_family, _, _, _, address) = socket.getaddrinfo(host, port)[0]
self.remote_host = address
self.target_socket = socket.socket(soc_family)
self.target_socket.connect(address)
def proxy_data(self, http_parser):
"""run the proxy"""
self.target_socket.send(self.first_data)
http_parser.send(HttpType.REQUEST, self.first_data)
sockets = [self.client_socket, self.target_socket]
empty_read_count = 0
while True:
empty_read_count += 1
(data, _, error) = select.select(sockets, [], sockets, _READ_TIMEOUT)
if error:
# connection closed, or error occurred.
break
if not data:
continue
for in_ in data:
try:
data = in_.recv(_BUF_SIZE)
except ConnectionResetError as e:
break
out = self.target_socket if in_ is self.client_socket else self.client_socket
http_type = HttpType.REQUEST if in_ is self.client_socket else HttpType.RESPONSE
if data:
out.send(data)
empty_read_count = 0
http_parser.send(http_type, data)
if empty_read_count == _MAX_READ_RETRY_COUNT:
break
def _worker(worker_socket, client_ip, client_port, output_file):
try:
handler = ConnectionHandler(worker_socket)
handler.init_connect()
processor = HttpPrinter((client_ip, client_port), handler.remote_host)
http_parser = HttpParser(processor)
handler.proxy_data(http_parser)
handler.close()
http_parser.finish()
except Exception:
import traceback
traceback.print_exc()
def start_server(host='0.0.0.0', port=8000, IPv6=False, output=None):
"""start proxy server."""
ip_version = IPv6 and socket.AF_INET6 or socket.AF_INET
server_socket = socket.socket(ip_version)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
server_socket.bind((host, port))
except Exception as e:
print(e)
sys.exit(-1)
print("Proxy start on %s:%d" % (host, port))
server_socket.listen(0)
output_file = open(output, "w+") if output else sys.stdout
config.out = output_file
def clean():
"""do clean job after process terminated"""
try:
server_socket.close()
except:
pass
try:
output_file.close()
except:
pass
# when press Ctrl+C, stop the proxy.
def signal_handler(signal, frame):
print('\nStopping proxy...')
clean()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
try:
while True:
worker_socket, client = server_socket.accept()
(client_ip, client_port) = client
worker_thread = threading.Thread(
target=_worker,
args=(worker_socket, client_ip, client_port, output_file)
)
worker_thread.setDaemon(True)
worker_thread.start()
finally:
clean()
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-l", "--listen",
help="the IP of the interface which the proxy listened on")
parser.add_argument("-p", "--port", type=int,
help="the port of the interface which the proxy listened on")
parser.add_argument("-6", "--ipv6", help="use ipv6", action="store_true")
parser.add_argument("-v", "--verbosity", help="increase output verbosity(-vv is recommended)",
action="count")
parser.add_argument("-g", "--group", help="group http request/response by connection",
action="store_true")
parser.add_argument("-o", "--output", help="output to file instead of stdout")
parser.add_argument("-e", "--encoding", help="decode the data use specified encodings.")
parser.add_argument("-b", "--beauty", help="output json in a pretty way.", action="store_true")
args = parser.parse_args()
setting = {"IPv6": args.ipv6}
if args.listen:
setting["host"] = args.listen
if args.port:
setting["port"] = args.port
if args.output:
setting["output"] = args.output
# output config
parse_config = config.get_config()
if args.verbosity:
parse_config.level = args.verbosity
if args.encoding:
parse_config.encoding = args.encoding
parse_config.pretty = args.beauty
parse_config.group = args.group
start_server(**setting)
if __name__ == '__main__':
main()
|
|
''' Support functions for changewithin.py script.
'''
import time, json, requests, os, sys
import urllib
from lxml import etree
from sets import Set
from ModestMaps.Geo import MercatorProjection, Location, Coordinate
from tempfile import mkstemp
dir_path = os.path.dirname(os.path.abspath(__file__))
def get_state():
r = requests.get('http://planet.openstreetmap.org/replication/day/state.txt')
return r.text.split('\n')[1].split('=')[1]
def get_osc(stateurl=None):
if not stateurl:
state = get_state()
# zero-pad state so it can be safely split.
state = '000000000' + state
path = '%s/%s/%s' % (state[-9:-6], state[-6:-3], state[-3:])
stateurl = 'http://planet.openstreetmap.org/replication/day/%s.osc.gz' % path
sys.stderr.write('downloading %s...\n' % stateurl)
# prepare a local file to store changes
handle, filename = mkstemp(prefix='change-', suffix='.osc.gz')
os.close(handle)
status = os.system('wget --quiet %s -O %s' % (stateurl, filename))
if status:
status = os.system('curl --silent %s -o %s' % (stateurl, filename))
if status:
raise Exception('Failure from both wget and curl')
sys.stderr.write('extracting %s...\n' % filename)
os.system('gunzip -f %s' % filename)
# knock off the ".gz" suffix and return
return filename[:-3]
# Returns -lon, -lat, +lon, +lat
#
# +---[+lat]---+
# | |
# [-lon] [+lon]
# | |
# +---[-lat]-- +
def get_bbox(poly):
box = [200, 200, -200, -200]
for p in poly:
if p[0] < box[0]: box[0] = p[0]
if p[0] > box[2]: box[2] = p[0]
if p[1] < box[1]: box[1] = p[1]
if p[1] > box[3]: box[3] = p[1]
return box
def point_in_box(x, y, box):
return x > box[0] and x < box[2] and y > box[1] and y < box[3]
def point_in_poly(x, y, poly):
n = len(poly)
inside = False
p1x, p1y = poly[0]
for i in xrange(n + 1):
p2x, p2y = poly[i % n]
if y > min(p1y, p2y):
if y <= max(p1y, p2y):
if x <= max(p1x, p2x):
if p1y != p2y:
xints = (y - p1y) * (p2x - p1x) / (p2y - p1y) + p1x
if p1x == p2x or x <= xints:
inside = not inside
p1x, p1y = p2x, p2y
return inside
def get_extent(gjson):
extent = {}
m = MercatorProjection(0)
b = get_bbox(extract_coords(gjson))
points = [[b[3], b[0]], [b[1], b[2]]]
if (points[0][0] - points[1][0] == 0) or (points[1][1] - points[0][1] == 0):
extent['lat'] = points[0][0]
extent['lon'] = points[1][1]
extent['zoom'] = 18
else:
i = float('inf')
w = 800
h = 600
tl = [min(map(lambda x: x[0], points)), min(map(lambda x: x[1], points))]
br = [max(map(lambda x: x[0], points)), max(map(lambda x: x[1], points))]
c1 = m.locationCoordinate(Location(tl[0], tl[1]))
c2 = m.locationCoordinate(Location(br[0], br[1]))
while (abs(c1.column - c2.column) * 256.0) < w and (abs(c1.row - c2.row) * 256.0) < h:
c1 = c1.zoomBy(1)
c2 = c2.zoomBy(1)
center = m.coordinateLocation(Coordinate(
(c1.row + c2.row) / 2,
(c1.column + c2.column) / 2,
c1.zoom))
extent['lat'] = center.lat
extent['lon'] = center.lon
if c1.zoom > 18:
extent['zoom'] = 18
else:
extent['zoom'] = c1.zoom
return extent
def has_building_tag(n):
return n.find(".//tag[@k='building']") is not None
def get_address_tags(tags):
addr_tags = []
for t in tags:
key = t.get('k')
if key.split(':')[0] == 'addr':
addr_tags.append(t.attrib)
return addr_tags
def has_address_change(gid, addr, version, elem):
url = 'http://api.openstreetmap.org/api/0.6/%s/%s/history' % (elem, gid)
r = requests.get(url)
if not r.text: return False
e = etree.fromstring(r.text.encode('utf-8'))
previous_elem = e.find(".//%s[@version='%s']" % (elem, (version - 1)))
previous_addr = get_address_tags(previous_elem.findall(".//tag[@k]"))
if len(addr) != len(previous_addr):
return True
else:
for a in addr:
if a not in previous_addr: return True
return False
def load_changeset(changeset):
changeset['wids'] = list(changeset['wids'])
changeset['nids'] = changeset['nodes'].keys()
changeset['addr_chg_nids'] = changeset['addr_chg_nd'].keys()
changeset['addr_chg_way'] = list(changeset['addr_chg_way'])
points = map(get_point, changeset['nodes'].values())
polygons = map(get_polygon, changeset['wids'])
gjson = geojson_feature_collection(points=points, polygons=polygons)
extent = get_extent(gjson)
url = 'http://api.openstreetmap.org/api/0.6/changeset/%s' % changeset['id']
r = requests.get(url)
if not r.text: return changeset
t = etree.fromstring(r.text.encode('utf-8'))
changeset['details'] = dict(t.find('.//changeset').attrib)
comment = t.find(".//tag[@k='comment']")
created_by = t.find(".//tag[@k='created_by']")
if comment is not None: changeset['comment'] = comment.get('v')
if created_by is not None: changeset['created_by'] = created_by.get('v')
changeset['map_img'] = 'http://api.tiles.mapbox.com/v3/lxbarth.map-lxoorpwz/geojson(%s)/%s,%s,%s/600x400.png' % (urllib.quote(json.dumps(gjson)), extent['lon'], extent['lat'], extent['zoom'])
if len(changeset['map_img']) > 2048:
changeset['map_img'] = 'http://api.tiles.mapbox.com/v3/lxbarth.map-lxoorpwz/geojson(%s)/%s,%s,%s/600x400.png' % (urllib.quote(json.dumps(bbox_from_geojson(gjson))), extent['lon'], extent['lat'], extent['zoom'])
changeset['map_link'] = 'http://www.openstreetmap.org/?lat=%s&lon=%s&zoom=%s&layers=M' % (extent['lat'], extent['lon'], extent['zoom'])
changeset['addr_count'] = len(changeset['addr_chg_way']) + len(changeset['addr_chg_nids'])
changeset['bldg_count'] = len(changeset['wids'])
return changeset
def add_changeset(el, cid, changesets):
if not changesets.get(cid, False):
changesets[cid] = {
'id': cid,
'user': el.get('user'),
'uid': el.get('uid'),
'wids': set(),
'nodes': {},
'addr_chg_way': set(),
'addr_chg_nd': {}
}
def add_node(el, nid, nodes):
if not nodes.get(nid, False):
nodes[nid] = {
'id': nid,
'lat': float(el.get('lat')),
'lon': float(el.get('lon'))
}
def geojson_multi_point(coords):
return {
"type": "Feature",
"properties": {},
"geometry": {
"type": "MultiPoint",
"coordinates": coords
}
}
def geojson_polygon(coords):
return {
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": coords
}
}
def extract_coords(gjson):
coords = []
for f in gjson['features']:
if f['geometry']['type'] == 'Polygon':
for c in f['geometry']['coordinates']:
coords.extend(c)
elif f['geometry']['type'] == 'MultiPoint':
coords.extend(f['geometry']['coordinates'])
elif f['type'] == 'Point':
coords.append(f['geometry']['coordinates'])
return coords
def bbox_from_geojson(gjson):
b = get_bbox(extract_coords(gjson))
return geojson_polygon([[[b[0], b[1]], [b[0], b[3]], [b[2], b[3]], [b[2], b[1]], [b[0], b[1]]]])
def get_polygon(wid):
coords = []
query = '''
[out:xml][timeout:25];
(
way(%s);
);
out body;
>;
out skel qt;
'''
r = requests.post('http://overpass-api.de/api/interpreter', data=(query % wid))
if not r.text: return coords
e = etree.fromstring(r.text.encode('utf-8'))
lookup = {}
for n in e.findall(".//node"):
lookup[n.get('id')] = [float(n.get('lon')), float(n.get('lat'))]
for n in e.findall(".//nd"):
if n.get('ref') in lookup:
coords.append(lookup[n.get('ref')])
return coords
def get_point(node):
return [node["lon"], node["lat"]]
def geojson_feature_collection(points=[], polygons=[]):
collection = {"type": "FeatureCollection", "features": []}
if len(points):
collection["features"].append(geojson_multi_point(points))
for p in polygons:
if len(p):
collection["features"].append(geojson_polygon([p]))
return collection
#
# Templates for generated emails.
#
html_tmpl = '''
<div style='font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;color:#333;max-width:600px;'>
<p style='float:right;'>{{date}}</p>
<h1 style='margin-bottom:10px;'>Summary</h1>
{{#stats}}
<ul style='font-size:15px;line-height:17px;list-style:none;margin-left:0;padding-left:0;'>
<li>Total changesets: <strong>{{total}}</strong></li>
<li>Total address changes: <strong>{{addresses}}</strong></li>
<li>Total building footprint changes: <strong>{{buildings}}</strong></li>
</ul>
{{#limit_exceed}}
<p style='font-size:13px;font-style:italic;'>{{limit_exceed}}</p>
{{/limit_exceed}}
{{/stats}}
{{#changesets}}
<h2 style='border-bottom:1px solid #ddd;padding-top:15px;padding-bottom:8px;'>Changeset <a href='http://openstreetmap.org/browse/changeset/{{id}}' style='text-decoration:none;color:#3879D9;'>#{{id}}</a></h2>
<p style='font-size:14px;line-height:17px;margin-bottom:20px;'>
<a href='http://openstreetmap.org/user/{{#details}}{{user}}{{/details}}' style='text-decoration:none;color:#3879D9;font-weight:bold;'>{{#details}}{{user}}{{/details}}</a>: {{comment}}
</p>
<p style='font-size:14px;line-height:17px;margin-bottom:0;'>
{{#bldg_count}}Changed buildings ({{bldg_count}}): {{#wids}}<a href='http://openstreetmap.org/browse/way/{{.}}/history' style='text-decoration:none;color:#3879D9;'>#{{.}}</a> {{/wids}}{{/bldg_count}}
</p>
<p style='font-size:14px;line-height:17px;margin-top:5px;margin-bottom:20px;'>
{{#addr_count}}Changed addresses ({{addr_count}}): {{#addr_chg_nids}}<a href='http://openstreetmap.org/browse/node/{{.}}/history' style='text-decoration:none;color:#3879D9;'>#{{.}}</a> {{/addr_chg_nids}}{{#addr_chg_way}}<a href='http://openstreetmap.org/browse/way/{{.}}/history' style='text-decoration:none;color:#3879D9;'>#{{.}}</a> {{/addr_chg_way}}{{/addr_count}}
</p>
<a href='{{map_link}}'><img src='{{map_img}}' style='border:1px solid #ddd;' /></a>
{{/changesets}}
</div>
'''
text_tmpl = '''
### Summary ###
{{date}}
{{#stats}}
Total changesets: {{total}}
Total building footprint changes: {{buildings}}
Total address changes: {{addresses}}
{{#limit_exceed}}
{{limit_exceed}}
{{/limit_exceed}}
{{/stats}}
{{#changesets}}
--- Changeset #{{id}} ---
URL: http://openstreetmap.org/browse/changeset/{{id}}
User: http://openstreetmap.org/user/{{#details}}{{user}}{{/details}}
Comment: {{comment}}
{{#bldg_count}}Changed buildings ({{bldg_count}}): {{wids}}{{/bldg_count}}
{{#addr_count}}Changed addresses ({{addr_count}}): {{addr_chg_nids}} {{addr_chg_way}}{{/addr_count}}
{{/changesets}}
'''
|
|
#!/usr/bin/env python
#
# Created by: Pearu Peterson, March 2002
#
""" Test functions for linalg.basic module
"""
from __future__ import division, print_function, absolute_import
"""
Bugs:
1) solve.check_random_sym_complex fails if a is complex
and transpose(a) = conjugate(a) (a is Hermitian).
"""
__usage__ = """
Build linalg:
python setup_linalg.py build
Run tests if scipy is installed:
python -c 'import scipy;scipy.linalg.test()'
Run tests if linalg is not installed:
python tests/test_basic.py
"""
import numpy as np
from numpy import arange, array, dot, zeros, identity, conjugate, transpose, \
float32
import numpy.linalg as linalg
from numpy.testing import TestCase, rand, run_module_suite, assert_raises, \
assert_equal, assert_almost_equal, assert_array_almost_equal, assert_, \
assert_allclose
from scipy.linalg import solve, inv, det, lstsq, pinv, pinv2, pinvh, norm,\
solve_banded, solveh_banded, solve_triangular
from scipy.linalg._testutils import assert_no_overwrite
def random(size):
return rand(*size)
class TestSolveBanded(TestCase):
def test_real(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_complex(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2j, 1, 20, 2j],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2j, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0,1j],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((l, u), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_real(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_tridiag_complex(self):
ab = array([[0.0, 20, 6, 2j],
[1, 4, 20, 14],
[-30, 1, 7, 0]])
a = np.diag(ab[0,1:], 1) + np.diag(ab[1,:], 0) + np.diag(ab[2,:-1], -1)
b4 = array([10.0, 0.0, 2.0, 14.0j])
b4by1 = b4.reshape(-1,1)
b4by2 = array([[2, 1],
[-30, 4],
[2, 3],
[1, 3]])
b4by4 = array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 1, 0, 0]])
for b in [b4, b4by1, b4by2, b4by4]:
x = solve_banded((1, 1), ab, b)
assert_array_almost_equal(dot(a, x), b)
def test_check_finite(self):
a = array([[1.0, 20, 0, 0],
[-30, 4, 6, 0],
[2, 1, 20, 2],
[0, -1, 7, 14]])
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
b4 = array([10.0, 0.0, 2.0, 14.0])
x = solve_banded((l, u), ab, b4, check_finite=False)
assert_array_almost_equal(dot(a, x), b4)
def test_bad_shape(self):
ab = array([[0.0, 20, 6, 2],
[1, 4, 20, 14],
[-30, 1, 7, 0],
[2, -1, 0, 0]])
l,u = 2,1
bad = array([1.0, 2.0, 3.0, 4.0]).reshape(-1,4)
assert_raises(ValueError, solve_banded, (l, u), ab, bad)
assert_raises(ValueError, solve_banded, (l, u), ab, [1.0, 2.0])
# Values of (l,u) are not compatible with ab.
assert_raises(ValueError, solve_banded, (1, 1), ab, [1.0, 2.0])
class TestSolveHBanded(TestCase):
def test_01_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 1D array.
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_upper(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_03_upper(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
# with the RHS as a 2D array with shape (3,1).
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0, 2.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0, 0.0]).reshape(-1,1))
def test_01_lower(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([1.0, 4.0, 1.0, 2.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_lower(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[4.0, 4.0, 4.0, 4.0],
[1.0, 1.0, 1.0, -99],
[2.0, 2.0, 0.0, 0.0]])
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_float32(self):
# Solve
# [ 4 1 2 0] [1]
# [ 1 4 1 2] X = [4]
# [ 2 1 4 1] [1]
# [ 0 2 1 4] [2]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0, 2.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0, 0.0])
def test_02_float32(self):
# Solve
# [ 4 1 2 0] [1 6]
# [ 1 4 1 2] X = [4 2]
# [ 2 1 4 1] [1 6]
# [ 0 2 1 4] [2 1]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, 1.0, 1.0, 1.0],
[4.0, 4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 6.0],
[4.0, 2.0],
[1.0, 6.0],
[2.0, 1.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_01_complex(self):
# Solve
# [ 4 -j 2 0] [2-j]
# [ j 4 -j 2] X = [4-j]
# [ 2 j 4 -j] [4+j]
# [ 0 2 j 4] [2+j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([2-1.0j, 4.0-1j, 4+1j, 2+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0, 0.0])
def test_02_complex(self):
# Solve
# [ 4 -j 2 0] [2-j 2+4j]
# [ j 4 -j 2] X = [4-j -1-j]
# [ 2 j 4 -j] [4+j 4+2j]
# [ 0 2 j 4] [2+j j]
#
ab = array([[0.0, 0.0, 2.0, 2.0],
[-99, -1.0j, -1.0j, -1.0j],
[4.0, 4.0, 4.0, 4.0]])
b = array([[2-1j, 2+4j],
[4.0-1j, -1-1j],
[4.0+1j, 4+2j],
[2+1j, 1j]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0],
[0.0, 0.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_upper(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_03_upper(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 2D array with shape (3,1).
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0]).reshape(-1,1)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, array([0.0, 1.0, 0.0]).reshape(-1,1))
def test_tridiag_01_lower(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, lower=True)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_lower(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[4.0, 4.0, 4.0],
[1.0, 1.0, -99]])
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]])
x = solveh_banded(ab, b, lower=True)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_float32(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
#
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]], dtype=float32)
b = array([1.0, 4.0, 1.0], dtype=float32)
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_tridiag_02_float32(self):
# Solve
# [ 4 1 0] [1 4]
# [ 1 4 1] X = [4 2]
# [ 0 1 4] [1 4]
#
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]], dtype=float32)
b = array([[1.0, 4.0],
[4.0, 2.0],
[1.0, 4.0]], dtype=float32)
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0],
[1.0, 0.0],
[0.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_tridiag_01_complex(self):
# Solve
# [ 4 -j 0] [ -j]
# [ j 4 -j] X = [4-j]
# [ 0 j 4] [4+j]
#
ab = array([[-99, -1.0j, -1.0j], [4.0, 4.0, 4.0]])
b = array([-1.0j, 4.0-1j, 4+1j])
x = solveh_banded(ab, b)
assert_array_almost_equal(x, [0.0, 1.0, 1.0])
def test_tridiag_02_complex(self):
# Solve
# [ 4 -j 0] [ -j 4j]
# [ j 4 -j] X = [4-j -1-j]
# [ 0 j 4] [4+j 4 ]
#
ab = array([[-99, -1.0j, -1.0j],
[4.0, 4.0, 4.0]])
b = array([[-1j, 4.0j],
[4.0-1j, -1.0-1j],
[4.0+1j, 4.0]])
x = solveh_banded(ab, b)
expected = array([[0.0, 1.0j],
[1.0, 0.0],
[1.0, 1.0]])
assert_array_almost_equal(x, expected)
def test_check_finite(self):
# Solve
# [ 4 1 0] [1]
# [ 1 4 1] X = [4]
# [ 0 1 4] [1]
# with the RHS as a 1D array.
ab = array([[-99, 1.0, 1.0], [4.0, 4.0, 4.0]])
b = array([1.0, 4.0, 1.0])
x = solveh_banded(ab, b, check_finite=False)
assert_array_almost_equal(x, [0.0, 1.0, 0.0])
def test_bad_shapes(self):
ab = array([[-99, 1.0, 1.0],
[4.0, 4.0, 4.0]])
b = array([[1.0, 4.0],
[4.0, 2.0]])
assert_raises(ValueError, solveh_banded, ab, b)
assert_raises(ValueError, solveh_banded, ab, [1.0, 2.0])
assert_raises(ValueError, solveh_banded, ab, [1.0])
class TestSolve(TestCase):
def setUp(self):
np.random.seed(1234)
def test_20Feb04_bug(self):
a = [[1,1],[1.0,0]] # ok
x0 = solve(a,[1,0j])
assert_array_almost_equal(dot(a,x0),[1,0])
a = [[1,1],[1.2,0]] # gives failure with clapack.zgesv(..,rowmajor=0)
b = [1,0j]
x0 = solve(a,b)
assert_array_almost_equal(dot(a,x0),[1,0])
def test_simple(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_simple_sym(self):
a = [[2,3],[3,5]]
for lower in [0,1]:
for b in ([[1,0],[0,1]],[1,0]):
x = solve(a,b,sym_pos=1,lower=lower)
assert_array_almost_equal(dot(a,x),b)
def test_simple_sym_complex(self):
a = [[5,2],[2,4]]
for b in [[1j,0],
[[1j,1j],
[0,2]],
]:
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_simple_complex(self):
a = array([[5,2],[2j,4]],'D')
for b in [[1j,0],
[[1j,1j],
[0,2]],
[1,0j],
array([1,0],'D'),
]:
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_nils_20Feb04(self):
n = 2
A = random([n,n])+random([n,n])*1j
X = zeros((n,n),'D')
Ainv = inv(A)
R = identity(n)+identity(n)*0j
for i in arange(0,n):
r = R[:,i]
X[:,i] = solve(A,r)
assert_array_almost_equal(X,Ainv)
def test_random(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_random_complex(self):
n = 20
a = random([n,n]) + 1j * random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x = solve(a,b)
assert_array_almost_equal(dot(a,x),b)
def test_random_sym(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = abs(20*(.1+a[i,i]))
for j in range(i):
a[i,j] = a[j,i]
for i in range(4):
b = random([n])
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_random_sym_complex(self):
n = 20
a = random([n,n])
# a = a + 1j*random([n,n]) # XXX: with this the accuracy will be very low
for i in range(n):
a[i,i] = abs(20*(.1+a[i,i]))
for j in range(i):
a[i,j] = conjugate(a[j,i])
b = random([n])+2j*random([n])
for i in range(2):
x = solve(a,b,sym_pos=1)
assert_array_almost_equal(dot(a,x),b)
def test_check_finite(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = solve(a,b, check_finite=False)
assert_array_almost_equal(dot(a,x),b)
class TestSolveTriangular(TestCase):
def test_simple(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1,0], [1,2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True)
assert_array_almost_equal(sol, [1, 0])
# check that it works also for non-contiguous matrices
sol = solve_triangular(A.T, b, lower=False)
assert_array_almost_equal(sol, [.5, .5])
# and that it gives the same result as trans=1
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [.5, .5])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[1., -.5], [0, 0.5]])
def test_simple_complex(self):
"""
solve_triangular on a simple 2x2 complex matrix
"""
A = array([[1+1j, 0], [1j, 2]])
b = identity(2)
sol = solve_triangular(A, b, lower=True, trans=1)
assert_array_almost_equal(sol, [[.5-.5j, -.25-.25j], [0, 0.5]])
def test_check_finite(self):
"""
solve_triangular on a simple 2x2 matrix.
"""
A = array([[1,0], [1,2]])
b = [1, 1]
sol = solve_triangular(A, b, lower=True, check_finite=False)
assert_array_almost_equal(sol, [1, 0])
class TestInv(TestCase):
def setUp(self):
np.random.seed(1234)
def test_simple(self):
a = [[1,2],[3,4]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
a = [[1,2,3],[4,5,6],[7,8,10]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0,0],[0,1,0],[0,0,1]])
def test_random(self):
n = 20
for i in range(4):
a = random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
identity(n))
def test_simple_complex(self):
a = [[1,2],[3,4j]]
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
def test_random_complex(self):
n = 20
for i in range(4):
a = random([n,n])+2j*random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
a_inv = inv(a)
assert_array_almost_equal(dot(a,a_inv),
identity(n))
def test_check_finite(self):
a = [[1,2],[3,4]]
a_inv = inv(a, check_finite=False)
assert_array_almost_equal(dot(a,a_inv),
[[1,0],[0,1]])
class TestDet(TestCase):
def setUp(self):
np.random.seed(1234)
def test_simple(self):
a = [[1,2],[3,4]]
a_det = det(a)
assert_almost_equal(a_det,-2.0)
def test_simple_complex(self):
a = [[1,2],[3,4j]]
a_det = det(a)
assert_almost_equal(a_det,-6+4j)
def test_random(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n,n])
d1 = det(a)
d2 = basic_det(a)
assert_almost_equal(d1,d2)
def test_random_complex(self):
basic_det = linalg.det
n = 20
for i in range(4):
a = random([n,n]) + 2j*random([n,n])
d1 = det(a)
d2 = basic_det(a)
assert_allclose(d1, d2, rtol=1e-13)
def test_check_finite(self):
a = [[1,2],[3,4]]
a_det = det(a, check_finite=False)
assert_almost_equal(a_det,-2.0)
def direct_lstsq(a,b,cmplx=0):
at = transpose(a)
if cmplx:
at = conjugate(at)
a1 = dot(at, a)
b1 = dot(at, b)
return solve(a1, b1)
class TestLstsq(TestCase):
def setUp(self):
np.random.seed(1234)
def test_random_overdet_large(self):
# bug report: Nils Wagner
n = 200
a = random([n,2])
for i in range(2):
a[i,i] = 20*(.1+a[i,i])
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(x,direct_lstsq(a,b))
def test_simple_exact(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_simple_overdet(self):
a = [[1,2],[4,5],[3,4]]
b = [1,2,3]
x,res,r,s = lstsq(a,b)
assert_array_almost_equal(x,direct_lstsq(a,b))
assert_almost_equal((abs(dot(a,x) - b)**2).sum(axis=0), res)
def test_simple_overdet_complex(self):
a = [[1+2j,2],[4,5],[3,4]]
b = [1,2+4j,3]
x,res,r,s = lstsq(a,b)
assert_array_almost_equal(x,direct_lstsq(a,b,cmplx=1))
assert_almost_equal(res, (abs(dot(a,x) - b)**2).sum(axis=0))
def test_simple_underdet(self):
a = [[1,2,3],[4,5,6]]
b = [1,2]
x,res,r,s = lstsq(a,b)
# XXX: need independent check
assert_array_almost_equal(x,[-0.05555556, 0.11111111, 0.27777778])
def test_random_exact(self):
n = 20
a = random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_random_complex_exact(self):
n = 20
a = random([n,n]) + 1j * random([n,n])
for i in range(n):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x = lstsq(a,b)[0]
assert_array_almost_equal(dot(a,x),b)
def test_random_overdet(self):
n = 20
m = 15
a = random([n,m])
for i in range(m):
a[i,i] = 20*(.1+a[i,i])
for i in range(4):
b = random([n,3])
x,res,r,s = lstsq(a,b)
assert_(r == m, 'unexpected efficient rank')
# XXX: check definition of res
assert_array_almost_equal(x,direct_lstsq(a,b))
def test_random_complex_overdet(self):
n = 20
m = 15
a = random([n,m]) + 1j * random([n,m])
for i in range(m):
a[i,i] = 20*(.1+a[i,i])
for i in range(2):
b = random([n,3])
x,res,r,s = lstsq(a,b)
assert_(r == m, 'unexpected efficient rank')
# XXX: check definition of res
assert_array_almost_equal(x,direct_lstsq(a,b,1))
def test_check_finite(self):
a = [[1,20],[-30,4]]
for b in ([[1,0],[0,1]],[1,0],
[[2,1],[-30,4]]):
x = lstsq(a,b, check_finite=False)[0]
assert_array_almost_equal(dot(a,x),b)
class TestPinv(TestCase):
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a_pinv = pinv(a)
assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a,a_pinv), np.eye(3))
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
a_pinv = pinv(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
a_pinv = pinv2(a)
assert_array_almost_equal(dot(a, a_pinv), np.eye(3))
def test_simple_singular(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_simple_cols(self):
a = array([[1, 2, 3], [4, 5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_simple_rows(self):
a = array([[1, 2], [3, 4], [5, 6]], dtype=float)
a_pinv = pinv(a)
a_pinv2 = pinv2(a)
assert_array_almost_equal(a_pinv,a_pinv2)
def test_check_finite(self):
a = array([[1,2,3],[4,5,6.],[7,8,10]])
a_pinv = pinv(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
a_pinv = pinv2(a, check_finite=False)
assert_array_almost_equal(dot(a,a_pinv),[[1,0,0],[0,1,0],[0,0,1]])
class TestPinvSymmetric(TestCase):
def test_simple_real(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
a = np.dot(a, a.T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
def test_nonpositive(self):
a = array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=float)
a = np.dot(a, a.T)
u, s, vt = np.linalg.svd(a)
s[0] *= -1
a = np.dot(u * s, vt) # a is now symmetric non-positive and singular
a_pinv = pinv2(a)
a_pinvh = pinvh(a)
assert_array_almost_equal(a_pinv, a_pinvh)
def test_simple_complex(self):
a = (array([[1, 2, 3], [4, 5, 6], [7, 8, 10]], dtype=float)
+ 1j * array([[10, 8, 7], [6, 5, 4], [3, 2, 1]], dtype=float))
a = np.dot(a, a.conj().T)
a_pinv = pinvh(a)
assert_array_almost_equal(np.dot(a, a_pinv), np.eye(3))
class TestNorm(object):
def test_types(self):
for dtype in np.typecodes['AllFloat']:
x = np.array([1,2,3], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
for dtype in np.typecodes['Complex']:
x = np.array([1j,2j,3j], dtype=dtype)
tol = max(1e-15, np.finfo(dtype).eps.real * 20)
assert_allclose(norm(x), np.sqrt(14), rtol=tol)
assert_allclose(norm(x, 2), np.sqrt(14), rtol=tol)
def test_overflow(self):
# unlike numpy's norm, this one is
# safer on overflow
a = array([1e20], dtype=float32)
assert_almost_equal(norm(a), a)
def test_stable(self):
# more stable than numpy's norm
a = array([1e4] + [1]*10000, dtype=float32)
try:
# snrm in double precision; we obtain the same as for float64
# -- large atol needed due to varying blas implementations
assert_allclose(norm(a) - 1e4, 0.5, atol=1e-2)
except AssertionError:
# snrm implemented in single precision, == np.linalg.norm result
msg = ": Result should equal either 0.0 or 0.5 (depending on " \
"implementation of snrm2)."
assert_almost_equal(norm(a) - 1e4, 0.0, err_msg=msg)
def test_zero_norm(self):
assert_equal(norm([1,0,3], 0), 2)
assert_equal(norm([1,2,3], 0), 3)
class TestOverwrite(object):
def test_solve(self):
assert_no_overwrite(solve, [(3,3), (3,)])
def test_solve_triangular(self):
assert_no_overwrite(solve_triangular, [(3,3), (3,)])
def test_solve_banded(self):
assert_no_overwrite(lambda ab, b: solve_banded((2,1), ab, b),
[(4,6), (6,)])
def test_solveh_banded(self):
assert_no_overwrite(solveh_banded, [(2,6), (6,)])
def test_inv(self):
assert_no_overwrite(inv, [(3,3)])
def test_det(self):
assert_no_overwrite(det, [(3,3)])
def test_lstsq(self):
assert_no_overwrite(lstsq, [(3,2), (3,)])
def test_pinv(self):
assert_no_overwrite(pinv, [(3,3)])
def test_pinv2(self):
assert_no_overwrite(pinv2, [(3,3)])
def test_pinvh(self):
assert_no_overwrite(pinvh, [(3,3)])
if __name__ == "__main__":
run_module_suite()
|
|
from Object import Object
from consts import *
import HeeksCNC
class Tool(Object):
def __init__(self, diameter = 3.0, title = None, tool_number = 0, type = TOOL_TYPE_SLOTCUTTER):
Object.__init__(self)
self.tool_number = tool_number
self.type = type
self.diameter = diameter
self.material = TOOL_MATERIAL_UNDEFINED
self.tool_length_offset = 0.0
self.x_offset = 0.0
self.front_angle = 0.0
self.tool_angle = 0.0
self.back_angle = 0.0
self.orientation = 0
'''
// also m_corner_radius, see below, is used for turning tools and milling tools
/**
The next three parameters describe the cutting surfaces of the bit.
The two radii go from the centre of the bit -> flat radius -> corner radius.
The vertical_cutting_edge_angle is the angle between the centre line of the
milling bit and the angle of the outside cutting edges. For an end-mill, this
would be zero. i.e. the cutting edges are parallel to the centre line
of the milling bit. For a chamfering bit, it may be something like 45 degrees.
i.e. 45 degrees from the centre line which has both cutting edges at 2 * 45 = 90
degrees to each other
For a ball-nose milling bit we would have
- m_corner_radius = m_diameter / 2
- m_flat_radius = 0 // No middle bit at the bottom of the cutter that remains flat
// before the corner radius starts.
- m_vertical_cutting_edge_angle = 0
For an end-mill we would have
- m_corner_radius = 0
- m_flat_radius = m_diameter / 2
- m_vertical_cutting_edge_angle = 0
For a chamfering bit we would have
- m_corner_radius = 0
- m_flat_radius = 0 // sharp pointed end. This may be larger if we can't use the centre point.
- m_vertical_cutting_edge_angle = 45 // degrees from centre line of tool
*/
'''
self.corner_radius = 0.0
self.flat_radius = 0.0
self.cutting_edge_angle = 0.0
self.cutting_edge_height = 0.0 # How far, from the bottom of the cutter, do the flutes extend?
self.max_advance_per_revolution = 0.0
''' // This is the maximum distance a tool should advance during a single
// revolution. This value is often defined by the manufacturer in
// terms of an advance no a per-tooth basis. This value, however,
// must be expressed on a per-revolution basis. i.e. we don't want
// to maintain the number of cutting teeth so a per-revolution
// value is easier to use.
'''
self.automatically_generate_title = True #// Set to true by default but reset to false when the user edits the title.
'''
// The following coordinates relate ONLY to touch probe tools. They describe
// the error the probe tool has in locating an X,Y point. These values are
// added to a probed point's location to find the actual point. The values
// should come from calibrating the touch probe. i.e. set machine position
// to (0,0,0), drill a hole and then probe for the centre of the hole. The
// coordinates found by the centre finding operation should be entered into
// these values verbatim. These will represent how far off concentric the
// touch probe's tip is with respect to the quil. Of course, these only
// make sense if the probe's body is aligned consistently each time. I will
// ASSUME this is correct.
'''
self.probe_offset_x = 0.0
self.probe_offset_y = 0.0
'''
// The following properties relate to the extrusions created by a reprap style 3D printer.
// using temperature, speed, and the height of the nozzle, and the nozzle size it's possible to create
// many different sizes and shapes of extrusion.
typedef std::pair< eExtrusionMaterial_t, wxString > ExtrusionMaterialDescription_t
typedef std::vector<ExtrusionMaterialDescription_t > ExtrusionMaterialsList_t
static ExtrusionMaterialsList_t GetExtrusionMaterialsList()
{
ExtrusionMaterialsList_t ExtrusionMaterials_list
ExtrusionMaterials_list.push_back( ExtrusionMaterialDescription_t( eABS, wxString(_("ABS Plastic")) ))
ExtrusionMaterials_list.push_back( ExtrusionMaterialDescription_t( ePLA, wxString(_("PLA Plastic")) ))
ExtrusionMaterials_list.push_back( ExtrusionMaterialDescription_t( eHDPE, wxString(_("HDPE Plastic")) ))
return(ExtrusionMaterials_list)
}
'''
self.extrusion_material = EXTRUSION_MATERIAL_ABS
self.feedrate = 0.0
self.layer_height = 0.1
self.width_over_thickness = 1.0
self.temperature = 200
self.flowrate = 10
self.filament_diameter = 0.2
'''
// The gradient is the steepest angle at which this tool can plunge into the material. Many
// tools behave better if they are slowly ramped down into the material. This gradient
// specifies the steepest angle of decsent. This is expected to be a negative number indicating
// the 'rise / run' ratio. Since the 'rise' will be downward, it will be negative.
// By this measurement, a drill bit's straight plunge would have an infinite gradient (all rise, no run).
// To cater for this, a value of zero will indicate a straight plunge.
'''
self.gradient = 0.0
'''
// properties for tapping tools
int m_direction // 0.. right hand tapping, 1..left hand tapping
double m_pitch // in units/rev
'''
if title != None:
self.title = title
else:
self.title = self.GenerateMeaningfulName()
self.ResetParametersToReasonableValues()
def TypeName(self):
return "Tool"
def name(self):
return self.title
def icon(self):
# the name of the PNG file in the HeeksCNC icons folder
return "tool"
def ResetParametersToReasonableValues(self):
if self.type != TOOL_TYPE_TURNINGTOOL:
self.tool_length_offset = (5 * self.diameter)
self.gradient = self.ReasonableGradient(self.type)
if self.type == TOOL_TYPE_DRILL:
self.corner_radius = 0.0
self.flat_radius = 0.0
self.cutting_edge_angle = 59.0
self.cutting_edge_height = self.diameter * 3.0
self.ResetTitle()
elif self.type == TOOL_TYPE_CENTREDRILL:
self.corner_radius = 0.0
self.flat_radius = 0.0
self.cutting_edge_angle = 59.0
self.cutting_edge_height = self.diameter * 1.0
self.ResetTitle()
elif self.type == TOOL_TYPE_ENDMILL:
self.corner_radius = 0.0
self.flat_radius = self.diameter / 2
self.cutting_edge_angle = 0.0
self.cutting_edge_height = self.diameter * 3.0
self.ResetTitle()
elif self.type == TOOL_TYPE_SLOTCUTTER:
self.corner_radius = 0.0
self.flat_radius = self.diameter / 2
self.cutting_edge_angle = 0.0
self.cutting_edge_height = self.diameter * 3.0
self.ResetTitle()
elif self.type == TOOL_TYPE_BALLENDMILL:
self.corner_radius = (self.diameter / 2)
self.flat_radius = 0.0
self.cutting_edge_angle = 0.0
self.cutting_edge_height = self.diameter * 3.0
self.ResetTitle()
'''
case CToolParams::eTouchProbe:
self.corner_radius = (self.diameter / 2)
self.flat_radius = 0
ResetTitle()
break
case CToolParams::eExtrusion:
self.corner_radius = (self.diameter / 2)
self.flat_radius = 0
ResetTitle()
break
case CToolParams::eToolLengthSwitch:
self.corner_radius = (self.diameter / 2)
ResetTitle()
break
case CToolParams::eChamfer:
self.corner_radius = 0
self.flat_radius = 0
self.cutting_edge_angle = 45
height = (self.diameter / 2.0) * tan( degrees_to_radians(90.0 - self.cutting_edge_angle))
self.cutting_edge_height = height
ResetTitle()
break
case CToolParams::eTurningTool:
// No special constraints for this.
ResetTitle()
break
case CToolParams::eTapTool:
self.tool_length_offset = (5 * self.diameter)
self.automatically_generate_title = 1
self.diameter = 6.0
self.direction = 0
self.pitch = 1.0
self.cutting_edge_height = self.diameter * 3.0
ResetTitle()
break
default:
wxMessageBox(_T("That is not a valid tool type. Aborting value change."))
return
'''
def ReasonableGradient(self, type):
if self.type == TOOL_TYPE_SLOTCUTTER or self.type == TOOL_TYPE_ENDMILL or self.type == TOOL_TYPE_BALLENDMILL:
return -0.1
return 0.0
def GenerateMeaningfulName(self):
name_str = ""
if self.type != TOOL_TYPE_TURNINGTOOL and self.type != TOOL_TYPE_TOUCHPROBE and self.type != TOOL_TYPE_TOOLLENGTHSWITCH:
if HeeksCNC.program.units == 1.0:
# We're using metric. Leave the diameter as a floating point number. It just looks more natural.
name_str = name_str + str(self.diameter) + " mm "
else:
# We're using inches.
# to do, Find a fractional representation if one matches.
name_str = name_str + str(self.diameter/HeeksCNC.program.units) + " inch "
if self.type != TOOL_TYPE_EXTRUSION and self.type != TOOL_TYPE_TOUCHPROBE and self.type != TOOL_TYPE_TOOLLENGTHSWITCH:
if self.material == TOOL_MATERIAL_HSS:
name_str = name_str + "HSS "
elif self.material == TOOL_MATERIAL_CARBIDE:
name_str = name_str + "Carbide "
if self.type == TOOL_TYPE_EXTRUSION:
if self.extrusion_material == EXTRUSION_MATERIAL_ABS:
name_str = name_str + "ABS "
elif self.extrusion_material == EXTRUSION_MATERIAL_PLA:
name_str = name_str + "PLA "
elif self.extrusion_material == EXTRUSION_MATERIAL_HDPE:
name_str = name_str + "HDPE "
if self.type == TOOL_TYPE_DRILL:
name_str = name_str + "Drill Bit"
elif self.type == TOOL_TYPE_CENTREDRILL:
name_str = name_str + "Centre Drill Bit"
elif self.type == TOOL_TYPE_ENDMILL:
name_str = name_str + "End Mill"
elif self.type == TOOL_TYPE_SLOTCUTTER:
name_str = name_str + "Slot Cutter"
elif self.type == TOOL_TYPE_BALLENDMILL:
name_str = name_str + "Ball End Mill"
elif self.type == TOOL_TYPE_CHAMFER:
# Remove all that we've already prepared.
name_str = str(self.cutting_edge_angle) + " degree" + "Chamfering Bit"
elif self.type == TOOL_TYPE_TURNINGTOOL:
name_str = name_str + "Turning Tool"
elif self.type == TOOL_TYPE_TOUCHPROBE:
name_str = name_str + "Touch Probe"
elif self.type == TOOL_TYPE_EXTRUSION:
name_str = name_str + "Extrusion"
elif self.type == TOOL_TYPE_TOOLLENGTHSWITCH:
name_str = name_str + "Tool Length Switch"
elif self.type == TOOL_TYPE_TAPTOOL:
# to do, copy code from CTool.cpp
name_str = name_str + "Tap Tool"
return name_str
def ResetTitle(self):
if self.automatically_generate_title:
self.title = self.GenerateMeaningfulName()
def AppendTextToProgram(self):
# The G10 command can be used (within EMC2) to add a tool to the tool
# table from within a program.
# G10 L1 P[tool number] R[radius] X[offset] Z[offset] Q[orientation]
#
# The radius value must be expressed in MACHINE CONFIGURATION UNITS. This may be different
# to this model's drawing units. The value is interpreted, at lease for EMC2, in terms
# of the units setup for the machine's configuration (something.ini in EMC2 parlence). At
# the moment we don't have a MACHINE CONFIGURATION UNITS parameter so we've got a 50%
# chance of getting it right.
if len(self.title) > 0:
HeeksCNC.program.python_program += "#('" + self.title + "')\n"
HeeksCNC.program.python_program += "tool_defn( id=" + str(self.tool_number) + ", "
if len(self.title) > 0:
HeeksCNC.program.python_program += "name='" + self.title + "', "
else:
HeeksCNC.program.python_program += "name=None, "
if self.diameter > 0.0:
HeeksCNC.program.python_program += "radius=" + str(self.diameter / 2 / HeeksCNC.program.units) + ", "
else:
HeeksCNC.program.python_program += "radius=None, "
if self.tool_length_offset > 0.0:
HeeksCNC.program.python_program += "length=" + str(self.tool_length_offset / HeeksCNC.program.units) + ", "
else:
HeeksCNC.program.python_program += "length=None, "
HeeksCNC.program.python_program += "gradient=" + str(self.gradient)
HeeksCNC.program.python_program += ")\n"
|
|
# -*- coding: utf-8 -*-
"""AWS DynamoDB result store backend."""
from __future__ import absolute_import, unicode_literals
from collections import namedtuple
from time import sleep, time
from kombu.utils.url import _parse_url as parse_url
from celery.exceptions import ImproperlyConfigured
from celery.five import string
from celery.utils.log import get_logger
from .base import KeyValueStoreBackend
try:
import boto3
from botocore.exceptions import ClientError
except ImportError: # pragma: no cover
boto3 = ClientError = None # noqa
__all__ = ('DynamoDBBackend',)
# Helper class that describes a DynamoDB attribute
DynamoDBAttribute = namedtuple('DynamoDBAttribute', ('name', 'data_type'))
logger = get_logger(__name__)
class DynamoDBBackend(KeyValueStoreBackend):
"""AWS DynamoDB result backend.
Raises:
celery.exceptions.ImproperlyConfigured:
if module :pypi:`boto3` is not available.
"""
#: default DynamoDB table name (`default`)
table_name = 'celery'
#: Read Provisioned Throughput (`default`)
read_capacity_units = 1
#: Write Provisioned Throughput (`default`)
write_capacity_units = 1
#: AWS region (`default`)
aws_region = None
#: The endpoint URL that is passed to boto3 (local DynamoDB) (`default`)
endpoint_url = None
#: Item time-to-live in seconds (`default`)
time_to_live_seconds = None
# DynamoDB supports Time to Live as an auto-expiry mechanism.
supports_autoexpire = True
_key_field = DynamoDBAttribute(name='id', data_type='S')
_value_field = DynamoDBAttribute(name='result', data_type='B')
_timestamp_field = DynamoDBAttribute(name='timestamp', data_type='N')
_ttl_field = DynamoDBAttribute(name='ttl', data_type='N')
_available_fields = None
def __init__(self, url=None, table_name=None, *args, **kwargs):
super(DynamoDBBackend, self).__init__(*args, **kwargs)
self.url = url
self.table_name = table_name or self.table_name
if not boto3:
raise ImproperlyConfigured(
'You need to install the boto3 library to use the '
'DynamoDB backend.')
aws_credentials_given = False
aws_access_key_id = None
aws_secret_access_key = None
if url is not None:
scheme, region, port, username, password, table, query = \
parse_url(url)
aws_access_key_id = username
aws_secret_access_key = password
access_key_given = aws_access_key_id is not None
secret_key_given = aws_secret_access_key is not None
if access_key_given != secret_key_given:
raise ImproperlyConfigured(
'You need to specify both the Access Key ID '
'and Secret.')
aws_credentials_given = access_key_given
if region == 'localhost':
# We are using the downloadable, local version of DynamoDB
self.endpoint_url = 'http://localhost:{}'.format(port)
self.aws_region = 'us-east-1'
logger.warning(
'Using local-only DynamoDB endpoint URL: {}'.format(
self.endpoint_url
)
)
else:
self.aws_region = region
# If endpoint_url is explicitly set use it instead
_get = self.app.conf.get
config_endpoint_url = _get('dynamodb_endpoint_url')
if config_endpoint_url:
self.endpoint_url = config_endpoint_url
self.read_capacity_units = int(
query.get(
'read',
self.read_capacity_units
)
)
self.write_capacity_units = int(
query.get(
'write',
self.write_capacity_units
)
)
ttl = query.get('ttl_seconds', self.time_to_live_seconds)
if ttl:
try:
self.time_to_live_seconds = int(ttl)
except ValueError as e:
logger.error(
'TTL must be a number; got "{ttl}"',
exc_info=e
)
raise e
self.table_name = table or self.table_name
self._available_fields = (
self._key_field,
self._value_field,
self._timestamp_field
)
self._client = None
if aws_credentials_given:
self._get_client(
access_key_id=aws_access_key_id,
secret_access_key=aws_secret_access_key
)
def _get_client(self, access_key_id=None, secret_access_key=None):
"""Get client connection."""
if self._client is None:
client_parameters = {
'region_name': self.aws_region
}
if access_key_id is not None:
client_parameters.update({
'aws_access_key_id': access_key_id,
'aws_secret_access_key': secret_access_key
})
if self.endpoint_url is not None:
client_parameters['endpoint_url'] = self.endpoint_url
self._client = boto3.client(
'dynamodb',
**client_parameters
)
self._get_or_create_table()
if self._has_ttl() is not None:
self._validate_ttl_methods()
self._set_table_ttl()
return self._client
def _get_table_schema(self):
"""Get the boto3 structure describing the DynamoDB table schema."""
return {
'AttributeDefinitions': [
{
'AttributeName': self._key_field.name,
'AttributeType': self._key_field.data_type
}
],
'TableName': self.table_name,
'KeySchema': [
{
'AttributeName': self._key_field.name,
'KeyType': 'HASH'
}
],
'ProvisionedThroughput': {
'ReadCapacityUnits': self.read_capacity_units,
'WriteCapacityUnits': self.write_capacity_units
}
}
def _get_or_create_table(self):
"""Create table if not exists, otherwise return the description."""
table_schema = self._get_table_schema()
try:
table_description = self._client.create_table(**table_schema)
logger.info(
'DynamoDB Table {} did not exist, creating.'.format(
self.table_name
)
)
# In case we created the table, wait until it becomes available.
self._wait_for_table_status('ACTIVE')
logger.info(
'DynamoDB Table {} is now available.'.format(
self.table_name
)
)
return table_description
except ClientError as e:
error_code = e.response['Error'].get('Code', 'Unknown')
# If table exists, do not fail, just return the description.
if error_code == 'ResourceInUseException':
return self._client.describe_table(
TableName=self.table_name
)
else:
raise e
def _has_ttl(self):
"""Return the desired Time to Live config.
- True: Enable TTL on the table; use expiry.
- False: Disable TTL on the table; don't use expiry.
- None: Ignore TTL on the table; don't use expiry.
"""
return None if self.time_to_live_seconds is None \
else self.time_to_live_seconds >= 0
def _validate_ttl_methods(self):
"""Verify boto support for the DynamoDB Time to Live methods."""
# Required TTL methods.
required_methods = (
'update_time_to_live',
'describe_time_to_live',
)
# Find missing methods.
missing_methods = []
for method in list(required_methods):
if not hasattr(self._client, method):
missing_methods.append(method)
if missing_methods:
logger.error(
(
'boto3 method(s) {methods} not found; ensure that '
'boto3>=1.9.178 and botocore>=1.12.178 are installed'
).format(
methods=','.join(missing_methods)
)
)
raise AttributeError(
'boto3 method(s) {methods} not found'.format(
methods=','.join(missing_methods)
)
)
def _get_ttl_specification(self, ttl_attr_name):
"""Get the boto3 structure describing the DynamoDB TTL specification."""
return {
'TableName': self.table_name,
'TimeToLiveSpecification': {
'Enabled': self._has_ttl(),
'AttributeName': ttl_attr_name
}
}
def _get_table_ttl_description(self):
# Get the current TTL description.
try:
description = self._client.describe_time_to_live(
TableName=self.table_name
)
except ClientError as e:
error_code = e.response['Error'].get('Code', 'Unknown')
error_message = e.response['Error'].get('Message', 'Unknown')
logger.error((
'Error describing Time to Live on DynamoDB table {table}: '
'{code}: {message}'
).format(
table=self.table_name,
code=error_code,
message=error_message,
))
raise e
return description
def _set_table_ttl(self):
"""Enable or disable Time to Live on the table."""
# Get the table TTL description, and return early when possible.
description = self._get_table_ttl_description()
status = description['TimeToLiveDescription']['TimeToLiveStatus']
if status in ('ENABLED', 'ENABLING'):
cur_attr_name = \
description['TimeToLiveDescription']['AttributeName']
if self._has_ttl():
if cur_attr_name == self._ttl_field.name:
# We want TTL enabled, and it is currently enabled or being
# enabled, and on the correct attribute.
logger.debug((
'DynamoDB Time to Live is {situation} '
'on table {table}'
).format(
situation='already enabled'
if status == 'ENABLED'
else 'currently being enabled',
table=self.table_name
))
return description
elif status in ('DISABLED', 'DISABLING'):
if not self._has_ttl():
# We want TTL disabled, and it is currently disabled or being
# disabled.
logger.debug((
'DynamoDB Time to Live is {situation} '
'on table {table}'
).format(
situation='already disabled'
if status == 'DISABLED'
else 'currently being disabled',
table=self.table_name
))
return description
# The state shouldn't ever have any value beyond the four handled
# above, but to ease troubleshooting of potential future changes, emit
# a log showing the unknown state.
else: # pragma: no cover
logger.warning((
'Unknown DynamoDB Time to Live status {status} '
'on table {table}. Attempting to continue.'
).format(
status=status,
table=self.table_name
))
# At this point, we have one of the following situations:
#
# We want TTL enabled,
#
# - and it's currently disabled: Try to enable.
#
# - and it's being disabled: Try to enable, but this is almost sure to
# raise ValidationException with message:
#
# Time to live has been modified multiple times within a fixed
# interval
#
# - and it's currently enabling or being enabled, but on the wrong
# attribute: Try to enable, but this will raise ValidationException
# with message:
#
# TimeToLive is active on a different AttributeName: current
# AttributeName is ttlx
#
# We want TTL disabled,
#
# - and it's currently enabled: Try to disable.
#
# - and it's being enabled: Try to disable, but this is almost sure to
# raise ValidationException with message:
#
# Time to live has been modified multiple times within a fixed
# interval
#
attr_name = \
cur_attr_name if status == 'ENABLED' else self._ttl_field.name
try:
specification = self._client.update_time_to_live(
**self._get_ttl_specification(
ttl_attr_name=attr_name
)
)
logger.info(
(
'DynamoDB table Time to Live updated: '
'table={table} enabled={enabled} attribute={attr}'
).format(
table=self.table_name,
enabled=self._has_ttl(),
attr=self._ttl_field.name
)
)
return specification
except ClientError as e:
error_code = e.response['Error'].get('Code', 'Unknown')
error_message = e.response['Error'].get('Message', 'Unknown')
logger.error((
'Error {action} Time to Live on DynamoDB table {table}: '
'{code}: {message}'
).format(
action='enabling' if self._has_ttl() else 'disabling',
table=self.table_name,
code=error_code,
message=error_message,
))
raise e
def _wait_for_table_status(self, expected='ACTIVE'):
"""Poll for the expected table status."""
achieved_state = False
while not achieved_state:
table_description = self.client.describe_table(
TableName=self.table_name
)
logger.debug(
'Waiting for DynamoDB table {} to become {}.'.format(
self.table_name,
expected
)
)
current_status = table_description['Table']['TableStatus']
achieved_state = current_status == expected
sleep(1)
def _prepare_get_request(self, key):
"""Construct the item retrieval request parameters."""
return {
'TableName': self.table_name,
'Key': {
self._key_field.name: {
self._key_field.data_type: key
}
}
}
def _prepare_put_request(self, key, value):
"""Construct the item creation request parameters."""
timestamp = time()
put_request = {
'TableName': self.table_name,
'Item': {
self._key_field.name: {
self._key_field.data_type: key
},
self._value_field.name: {
self._value_field.data_type: value
},
self._timestamp_field.name: {
self._timestamp_field.data_type: str(timestamp)
}
}
}
if self._has_ttl():
put_request['Item'].update({
self._ttl_field.name: {
self._ttl_field.data_type:
str(int(timestamp + self.time_to_live_seconds))
}
})
return put_request
def _item_to_dict(self, raw_response):
"""Convert get_item() response to field-value pairs."""
if 'Item' not in raw_response:
return {}
return {
field.name: raw_response['Item'][field.name][field.data_type]
for field in self._available_fields
}
@property
def client(self):
return self._get_client()
def get(self, key):
key = string(key)
request_parameters = self._prepare_get_request(key)
item_response = self.client.get_item(**request_parameters)
item = self._item_to_dict(item_response)
return item.get(self._value_field.name)
def set(self, key, value):
key = string(key)
request_parameters = self._prepare_put_request(key, value)
self.client.put_item(**request_parameters)
def mget(self, keys):
return [self.get(key) for key in keys]
def delete(self, key):
key = string(key)
request_parameters = self._prepare_get_request(key)
self.client.delete_item(**request_parameters)
|
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'TelemetryStreamProtocolEnum' : _MetaInfoEnum('TelemetryStreamProtocolEnum', 'ydk.models.openconfig.openconfig_telemetry',
{
'TCP':'TCP',
'UDP':'UDP',
}, 'openconfig-telemetry', _yang_ns._namespaces['openconfig-telemetry']),
'TelemetrySystem.SensorGroups.SensorGroup.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup.Config',
False,
[
_MetaInfoClassMember('sensor-group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name or identifier for the sensor group itself.
Will be referenced by other configuration specifying a
sensor group
''',
'sensor_group_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups.SensorGroup.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup.State',
False,
[
_MetaInfoClassMember('sensor-group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Name or identifier for the sensor group itself.
Will be referenced by other configuration specifying a
sensor group
''',
'sensor_group_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.Config',
False,
[
_MetaInfoClassMember('exclude-filter', ATTRIBUTE, 'str' , None, None,
[], [],
''' Filter to exclude certain values out of the state
values
''',
'exclude_filter',
'openconfig-telemetry', False),
_MetaInfoClassMember('path', ATTRIBUTE, 'str' , None, None,
[], [],
''' Path to a section of operational state of interest
(the sensor).
''',
'path',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.State',
False,
[
_MetaInfoClassMember('exclude-filter', ATTRIBUTE, 'str' , None, None,
[], [],
''' Filter to exclude certain values out of the state
values
''',
'exclude_filter',
'openconfig-telemetry', False),
_MetaInfoClassMember('path', ATTRIBUTE, 'str' , None, None,
[], [],
''' Path to a section of operational state of interest
(the sensor).
''',
'path',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath',
False,
[
_MetaInfoClassMember('path', ATTRIBUTE, 'str' , None, None,
[], [],
''' Reference to the path of interest
''',
'path',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.Config',
[], [],
''' Configuration parameters to configure a set
of data model paths as a sensor grouping
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.State',
[], [],
''' Configuration parameters to configure a set
of data model paths as a sensor grouping
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-path',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup.SensorPaths',
False,
[
_MetaInfoClassMember('sensor-path', REFERENCE_LIST, 'SensorPath' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath',
[], [],
''' List of paths in the model which together
comprise a sensor grouping. Filters for each path
to exclude items are also provided.
''',
'sensor_path',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-paths',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups.SensorGroup' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups.SensorGroup',
False,
[
_MetaInfoClassMember('sensor-group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Reference to the name or identifier of the
sensor grouping
''',
'sensor_group_id',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup.Config',
[], [],
''' Configuration parameters relating to the
telemetry sensor grouping
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('sensor-paths', REFERENCE_CLASS, 'SensorPaths' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup.SensorPaths',
[], [],
''' Top level container to hold a set of sensor
paths grouped together
''',
'sensor_paths',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup.State',
[], [],
''' State information relating to the telemetry
sensor group
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-group',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.SensorGroups' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.SensorGroups',
False,
[
_MetaInfoClassMember('sensor-group', REFERENCE_LIST, 'SensorGroup' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups.SensorGroup',
[], [],
''' List of telemetry sensory groups on the local
system, where a sensor grouping represents a resuable
grouping of multiple paths and exclude filters.
''',
'sensor_group',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-groups',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup.Config',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Unique identifier for the destination group
''',
'group_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup.State',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Unique identifier for destination group
''',
'group_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.Config',
False,
[
_MetaInfoClassMember('destination-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False, [
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False),
]),
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Protocol (udp or tcp) port number for the telemetry
stream destination
''',
'destination_port',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-protocol', REFERENCE_ENUM_CLASS, 'TelemetryStreamProtocolEnum' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetryStreamProtocolEnum',
[], [],
''' Protocol used to transmit telemetry data to the
collector
''',
'destination_protocol',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.State',
False,
[
_MetaInfoClassMember('destination-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False, [
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False),
]),
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Protocol (udp or tcp) port number for the telemetry
stream destination
''',
'destination_port',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-protocol', REFERENCE_ENUM_CLASS, 'TelemetryStreamProtocolEnum' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetryStreamProtocolEnum',
[], [],
''' Protocol used to transmit telemetry data to the
collector
''',
'destination_protocol',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination',
False,
[
_MetaInfoClassMember('destination-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' Reference to the destination address of the
telemetry stream
''',
'destination_address',
'openconfig-telemetry', True, [
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' Reference to the destination address of the
telemetry stream
''',
'destination_address',
'openconfig-telemetry', True),
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' Reference to the destination address of the
telemetry stream
''',
'destination_address',
'openconfig-telemetry', True),
]),
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Reference to the port number of the stream
destination
''',
'destination_port',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.Config',
[], [],
''' Configuration parameters relating to
telemetry destinations
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.State',
[], [],
''' State information associated with
telemetry destinations
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'destination',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup.Destinations',
False,
[
_MetaInfoClassMember('destination', REFERENCE_LIST, 'Destination' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination',
[], [],
''' List of telemetry stream destinations
''',
'destination',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'destinations',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups.DestinationGroup' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups.DestinationGroup',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' Unique identifier for the destination group
''',
'group_id',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup.Config',
[], [],
''' Top level config container for destination groups
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('destinations', REFERENCE_CLASS, 'Destinations' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup.Destinations',
[], [],
''' The destination container lists the destination
information such as IP address and port of the
telemetry messages from the network element.
''',
'destinations',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup.State',
[], [],
''' Top level state container for destination groups
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'destination-group',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.DestinationGroups' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.DestinationGroups',
False,
[
_MetaInfoClassMember('destination-group', REFERENCE_LIST, 'DestinationGroup' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups.DestinationGroup',
[], [],
''' List of destination-groups. Destination groups allow the
reuse of common telemetry destinations across the
telemetry configuration. An operator references a
set of destinations via the configurable
destination-group-identifier.
A destination group may contain one or more telemetry
destinations
''',
'destination_group',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'destination-groups',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.Config',
False,
[
_MetaInfoClassMember('local-source-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' The IP address which will be the source of packets from
the device to a telemetry collector destination.
''',
'local_source_address',
'openconfig-telemetry', False, [
_MetaInfoClassMember('local-source-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' The IP address which will be the source of packets from
the device to a telemetry collector destination.
''',
'local_source_address',
'openconfig-telemetry', False),
_MetaInfoClassMember('local-source-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' The IP address which will be the source of packets from
the device to a telemetry collector destination.
''',
'local_source_address',
'openconfig-telemetry', False),
]),
_MetaInfoClassMember('originated-qos-marking', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' DSCP marking of packets generated by the telemetry
subsystem on the network device.
''',
'originated_qos_marking',
'openconfig-telemetry', False),
_MetaInfoClassMember('subscription-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Identifer of the telemetry subscription.
Will be used by configuration operations needing
to modify or delete the telemetry subscription
''',
'subscription_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.State',
False,
[
_MetaInfoClassMember('local-source-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' The IP address which will be the source of packets from
the device to a telemetry collector destination.
''',
'local_source_address',
'openconfig-telemetry', False, [
_MetaInfoClassMember('local-source-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' The IP address which will be the source of packets from
the device to a telemetry collector destination.
''',
'local_source_address',
'openconfig-telemetry', False),
_MetaInfoClassMember('local-source-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' The IP address which will be the source of packets from
the device to a telemetry collector destination.
''',
'local_source_address',
'openconfig-telemetry', False),
]),
_MetaInfoClassMember('originated-qos-marking', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' DSCP marking of packets generated by the telemetry
subsystem on the network device.
''',
'originated_qos_marking',
'openconfig-telemetry', False),
_MetaInfoClassMember('subscription-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Identifer of the telemetry subscription.
Will be used by configuration operations needing
to modify or delete the telemetry subscription
''',
'subscription_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.Config',
False,
[
_MetaInfoClassMember('heartbeat-interval', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Maximum time interval in seconds that may pass
between updates from a device to a telemetry collector.
If this interval expires, but there is no updated data to
send (such as if suppress_updates has been configured), the
device must send a telemetry message to the collector.
''',
'heartbeat_interval',
'openconfig-telemetry', False),
_MetaInfoClassMember('sample-interval', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Time in milliseconds between the device's sample of a
telemetry data source. For example, setting this to 100
would require the local device to collect the telemetry
data every 100 milliseconds. There can be latency or jitter
in transmitting the data, but the sample must occur at
the specified interval.
The timestamp must reflect the actual time when the data
was sampled, not simply the previous sample timestamp +
sample-interval.
If sample-interval is set to 0, the telemetry sensor
becomes event based. The sensor must then emit data upon
every change of the underlying data source.
''',
'sample_interval',
'openconfig-telemetry', False),
_MetaInfoClassMember('sensor-group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Reference to the sensor group which is used in the profile
''',
'sensor_group',
'openconfig-telemetry', False),
_MetaInfoClassMember('suppress-redundant', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Boolean flag to control suppression of redundant
telemetry updates to the collector platform. If this flag is
set to TRUE, then the collector will only send an update at
the configured interval if a subscribed data value has
changed. Otherwise, the device will not send an update to
the collector until expiration of the heartbeat interval.
''',
'suppress_redundant',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.State',
False,
[
_MetaInfoClassMember('heartbeat-interval', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Maximum time interval in seconds that may pass
between updates from a device to a telemetry collector.
If this interval expires, but there is no updated data to
send (such as if suppress_updates has been configured), the
device must send a telemetry message to the collector.
''',
'heartbeat_interval',
'openconfig-telemetry', False),
_MetaInfoClassMember('sample-interval', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Time in milliseconds between the device's sample of a
telemetry data source. For example, setting this to 100
would require the local device to collect the telemetry
data every 100 milliseconds. There can be latency or jitter
in transmitting the data, but the sample must occur at
the specified interval.
The timestamp must reflect the actual time when the data
was sampled, not simply the previous sample timestamp +
sample-interval.
If sample-interval is set to 0, the telemetry sensor
becomes event based. The sensor must then emit data upon
every change of the underlying data source.
''',
'sample_interval',
'openconfig-telemetry', False),
_MetaInfoClassMember('sensor-group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Reference to the sensor group which is used in the profile
''',
'sensor_group',
'openconfig-telemetry', False),
_MetaInfoClassMember('suppress-redundant', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Boolean flag to control suppression of redundant
telemetry updates to the collector platform. If this flag is
set to TRUE, then the collector will only send an update at
the configured interval if a subscribed data value has
changed. Otherwise, the device will not send an update to
the collector until expiration of the heartbeat interval.
''',
'suppress_redundant',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile',
False,
[
_MetaInfoClassMember('sensor-group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Reference to the telemetry sensor group name
''',
'sensor_group',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.Config',
[], [],
''' Configuration parameters related to the sensor
profile for a subscription
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.State',
[], [],
''' State information relating to the sensor profile
for a subscription
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-profile',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles',
False,
[
_MetaInfoClassMember('sensor-profile', REFERENCE_LIST, 'SensorProfile' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile',
[], [],
''' List of telemetry sensor groups used
in the subscription
''',
'sensor_profile',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-profiles',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.Config' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.Config',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' The destination group id references a reusable
group of destination addresses and ports for
the telemetry stream.
''',
'group_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'config',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.State',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' The destination group id references a reusable
group of destination addresses and ports for
the telemetry stream.
''',
'group_id',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup',
False,
[
_MetaInfoClassMember('group-id', ATTRIBUTE, 'str' , None, None,
[], [],
''' The destination group id references a configured
group of destinations for the telemetry stream.
''',
'group_id',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.Config',
[], [],
''' Configuration parameters related to telemetry
destinations.
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.State',
[], [],
''' State information related to telemetry
destinations
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'destination-group',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups',
False,
[
_MetaInfoClassMember('destination-group', REFERENCE_LIST, 'DestinationGroup' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup',
[], [],
''' Identifier of the previously defined destination
group
''',
'destination_group',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'destination-groups',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent.Subscription' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent.Subscription',
False,
[
_MetaInfoClassMember('subscription-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Reference to the identifier of the subscription
itself. The id will be the handle to refer to the
subscription once created
''',
'subscription_id',
'openconfig-telemetry', True),
_MetaInfoClassMember('config', REFERENCE_CLASS, 'Config' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.Config',
[], [],
''' Config parameters relating to the telemetry
subscriptions on the local device
''',
'config',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-groups', REFERENCE_CLASS, 'DestinationGroups' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups',
[], [],
''' A subscription may specify destination addresses.
If the subscription supplies destination addresses,
the network element will be the initiator of the
telemetry streaming, sending it to the destination(s)
specified.
If the destination set is omitted, the subscription
preconfigures certain elements such as paths and
sample intervals under a specified subscription ID.
In this case, the network element will NOT initiate an
outbound connection for telemetry, but will wait for
an inbound connection from a network management
system.
It is expected that the network management system
connecting to the network element will reference
the preconfigured subscription ID when initiating
a subscription.
''',
'destination_groups',
'openconfig-telemetry', False),
_MetaInfoClassMember('sensor-profiles', REFERENCE_CLASS, 'SensorProfiles' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles',
[], [],
''' A sensor profile is a set of sensor groups or
individual sensor paths which are associated with a
telemetry subscription. This is the source of the
telemetry data for the subscription to send to the
defined collectors.
''',
'sensor_profiles',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription.State',
[], [],
''' State parameters relating to the telemetry
subscriptions on the local device
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'subscription',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Persistent' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Persistent',
False,
[
_MetaInfoClassMember('subscription', REFERENCE_LIST, 'Subscription' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent.Subscription',
[], [],
''' List of telemetry subscriptions. A telemetry
subscription consists of a set of collection
destinations, stream attributes, and associated paths to
state information in the model (sensor data)
''',
'subscription',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'persistent',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Dynamic.Subscription.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Dynamic.Subscription.State',
False,
[
_MetaInfoClassMember('destination-address', REFERENCE_UNION, 'str' , None, None,
[], [],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False, [
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'(([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\\.){3}([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])(%[\\p{N}\\p{L}]+)?'],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-address', ATTRIBUTE, 'str' , None, None,
[], [b'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'],
''' IP address of the telemetry stream destination
''',
'destination_address',
'openconfig-telemetry', False),
]),
_MetaInfoClassMember('destination-port', ATTRIBUTE, 'int' , None, None,
[('0', '65535')], [],
''' Protocol (udp or tcp) port number for the telemetry
stream destination
''',
'destination_port',
'openconfig-telemetry', False),
_MetaInfoClassMember('destination-protocol', REFERENCE_ENUM_CLASS, 'TelemetryStreamProtocolEnum' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetryStreamProtocolEnum',
[], [],
''' Protocol used to transmit telemetry data to the
collector
''',
'destination_protocol',
'openconfig-telemetry', False),
_MetaInfoClassMember('heartbeat-interval', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Maximum time interval in seconds that may pass
between updates from a device to a telemetry collector.
If this interval expires, but there is no updated data to
send (such as if suppress_updates has been configured), the
device must send a telemetry message to the collector.
''',
'heartbeat_interval',
'openconfig-telemetry', False),
_MetaInfoClassMember('originated-qos-marking', ATTRIBUTE, 'int' , None, None,
[('0', '63')], [],
''' DSCP marking of packets generated by the telemetry
subsystem on the network device.
''',
'originated_qos_marking',
'openconfig-telemetry', False),
_MetaInfoClassMember('sample-interval', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Time in milliseconds between the device's sample of a
telemetry data source. For example, setting this to 100
would require the local device to collect the telemetry
data every 100 milliseconds. There can be latency or jitter
in transmitting the data, but the sample must occur at
the specified interval.
The timestamp must reflect the actual time when the data
was sampled, not simply the previous sample timestamp +
sample-interval.
If sample-interval is set to 0, the telemetry sensor
becomes event based. The sensor must then emit data upon
every change of the underlying data source.
''',
'sample_interval',
'openconfig-telemetry', False),
_MetaInfoClassMember('subscription-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Identifer of the telemetry subscription.
Will be used by configuration operations needing
to modify or delete the telemetry subscription
''',
'subscription_id',
'openconfig-telemetry', False),
_MetaInfoClassMember('suppress-redundant', ATTRIBUTE, 'bool' , None, None,
[], [],
''' Boolean flag to control suppression of redundant
telemetry updates to the collector platform. If this flag is
set to TRUE, then the collector will only send an update at
the configured interval if a subscribed data value has
changed. Otherwise, the device will not send an update to
the collector until expiration of the heartbeat interval.
''',
'suppress_redundant',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath.State' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath.State',
False,
[
_MetaInfoClassMember('exclude-filter', ATTRIBUTE, 'str' , None, None,
[], [],
''' Filter to exclude certain values out of the state
values
''',
'exclude_filter',
'openconfig-telemetry', False),
_MetaInfoClassMember('path', ATTRIBUTE, 'str' , None, None,
[], [],
''' Path to a section of operational state of interest
(the sensor).
''',
'path',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'state',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath',
False,
[
_MetaInfoClassMember('path', ATTRIBUTE, 'str' , None, None,
[], [],
''' Reference to the path of interest
''',
'path',
'openconfig-telemetry', True),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath.State',
[], [],
''' State information for a dynamic subscription's
paths of interest
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-path',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths',
False,
[
_MetaInfoClassMember('sensor-path', REFERENCE_LIST, 'SensorPath' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath',
[], [],
''' List of paths in the model which together
comprise a sensor grouping. Filters for each path
to exclude items are also provided.
''',
'sensor_path',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'sensor-paths',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Dynamic.Subscription' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Dynamic.Subscription',
False,
[
_MetaInfoClassMember('subscription-id', ATTRIBUTE, 'int' , None, None,
[('0', '18446744073709551615')], [],
''' Reference to the identifier of the subscription
itself. The id will be the handle to refer to the
subscription once created
''',
'subscription_id',
'openconfig-telemetry', True),
_MetaInfoClassMember('sensor-paths', REFERENCE_CLASS, 'SensorPaths' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths',
[], [],
''' Top level container to hold a set of sensor
paths grouped together
''',
'sensor_paths',
'openconfig-telemetry', False),
_MetaInfoClassMember('state', REFERENCE_CLASS, 'State' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Dynamic.Subscription.State',
[], [],
''' State information relating to dynamic telemetry
subscriptions.
''',
'state',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'subscription',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions.Dynamic' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions.Dynamic',
False,
[
_MetaInfoClassMember('subscription', REFERENCE_LIST, 'Subscription' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Dynamic.Subscription',
[], [],
''' List representation of telemetry subscriptions that
are configured via an inline RPC, otherwise known
as dynamic telemetry subscriptions.
''',
'subscription',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'dynamic',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem.Subscriptions' : {
'meta_info' : _MetaInfoClass('TelemetrySystem.Subscriptions',
False,
[
_MetaInfoClassMember('dynamic', REFERENCE_CLASS, 'Dynamic' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Dynamic',
[], [],
''' This container holds information relating to dynamic
telemetry subscriptions. A dynamic subscription is
typically configured through an RPC channel, and does not
persist across device restarts, or if the RPC channel is
reset or otherwise torn down.
''',
'dynamic',
'openconfig-telemetry', False),
_MetaInfoClassMember('persistent', REFERENCE_CLASS, 'Persistent' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions.Persistent',
[], [],
''' This container holds information relating to persistent
telemetry subscriptions. A persistent telemetry
subscription is configued locally on the device through
configuration, and is persistent across device restarts or
other redundancy changes.
''',
'persistent',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'subscriptions',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
'TelemetrySystem' : {
'meta_info' : _MetaInfoClass('TelemetrySystem',
False,
[
_MetaInfoClassMember('destination-groups', REFERENCE_CLASS, 'DestinationGroups' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.DestinationGroups',
[], [],
''' Top level container for destination group configuration
and state.
''',
'destination_groups',
'openconfig-telemetry', False),
_MetaInfoClassMember('sensor-groups', REFERENCE_CLASS, 'SensorGroups' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.SensorGroups',
[], [],
''' Top level container for sensor-groups.
''',
'sensor_groups',
'openconfig-telemetry', False),
_MetaInfoClassMember('subscriptions', REFERENCE_CLASS, 'Subscriptions' , 'ydk.models.openconfig.openconfig_telemetry', 'TelemetrySystem.Subscriptions',
[], [],
''' This container holds information for both persistent
and dynamic telemetry subscriptions.
''',
'subscriptions',
'openconfig-telemetry', False),
],
'openconfig-telemetry',
'telemetry-system',
_yang_ns._namespaces['openconfig-telemetry'],
'ydk.models.openconfig.openconfig_telemetry'
),
},
}
_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.Config']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath']['meta_info']
_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath.State']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath']['meta_info']
_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths.SensorPath']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths']['meta_info']
_meta_table['TelemetrySystem.SensorGroups.SensorGroup.Config']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups.SensorGroup']['meta_info']
_meta_table['TelemetrySystem.SensorGroups.SensorGroup.State']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups.SensorGroup']['meta_info']
_meta_table['TelemetrySystem.SensorGroups.SensorGroup.SensorPaths']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups.SensorGroup']['meta_info']
_meta_table['TelemetrySystem.SensorGroups.SensorGroup']['meta_info'].parent =_meta_table['TelemetrySystem.SensorGroups']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.Config']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination.State']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations.Destination']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Config']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.State']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup.Destinations']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups.DestinationGroup']['meta_info'].parent =_meta_table['TelemetrySystem.DestinationGroups']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.Config']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile.State']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles.SensorProfile']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.Config']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup.State']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups.DestinationGroup']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.Config']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.State']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.SensorProfiles']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription.DestinationGroups']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent.Subscription']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Persistent']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath.State']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths.SensorPath']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription.State']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription.SensorPaths']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Dynamic.Subscription']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions.Dynamic']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Persistent']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions']['meta_info']
_meta_table['TelemetrySystem.Subscriptions.Dynamic']['meta_info'].parent =_meta_table['TelemetrySystem.Subscriptions']['meta_info']
_meta_table['TelemetrySystem.SensorGroups']['meta_info'].parent =_meta_table['TelemetrySystem']['meta_info']
_meta_table['TelemetrySystem.DestinationGroups']['meta_info'].parent =_meta_table['TelemetrySystem']['meta_info']
_meta_table['TelemetrySystem.Subscriptions']['meta_info'].parent =_meta_table['TelemetrySystem']['meta_info']
|
|
"""Constants used for Coinbase."""
CONF_CURRENCIES = "account_balance_currencies"
CONF_EXCHANGE_BASE = "exchange_base"
CONF_EXCHANGE_RATES = "exchange_rate_currencies"
CONF_OPTIONS = "options"
CONF_TITLE = "title"
DOMAIN = "coinbase"
# These are constants used by the previous YAML configuration
CONF_YAML_API_TOKEN = "api_secret"
# Constants for data returned by Coinbase API
API_ACCOUNT_AMOUNT = "amount"
API_ACCOUNT_BALANCE = "balance"
API_ACCOUNT_CURRENCY = "currency"
API_ACCOUNT_ID = "id"
API_ACCOUNT_NATIVE_BALANCE = "native_balance"
API_ACCOUNT_NAME = "name"
API_ACCOUNTS_DATA = "data"
API_RATES = "rates"
API_RESOURCE_PATH = "resource_path"
API_RESOURCE_TYPE = "type"
API_TYPE_VAULT = "vault"
WALLETS = {
"1INCH": "1INCH",
"AAVE": "AAVE",
"ADA": "ADA",
"AED": "AED",
"AFN": "AFN",
"ALGO": "ALGO",
"ALL": "ALL",
"AMD": "AMD",
"AMP": "AMP",
"ANG": "ANG",
"ANKR": "ANKR",
"AOA": "AOA",
"ARS": "ARS",
"ATOM": "ATOM",
"AUCTION": "AUCTION",
"AUD": "AUD",
"AWG": "AWG",
"AZN": "AZN",
"BAL": "BAL",
"BAM": "BAM",
"BAND": "BAND",
"BAT": "BAT",
"BBD": "BBD",
"BCH": "BCH",
"BDT": "BDT",
"BGN": "BGN",
"BHD": "BHD",
"BIF": "BIF",
"BMD": "BMD",
"BND": "BND",
"BNT": "BNT",
"BOB": "BOB",
"BOND": "BOND",
"BRL": "BRL",
"BSD": "BSD",
"BSV": "BSV",
"BTC": "BTC",
"BTN": "BTN",
"BWP": "BWP",
"BYN": "BYN",
"BYR": "BYR",
"BZD": "BZD",
"CAD": "CAD",
"CDF": "CDF",
"CGLD": "CGLD",
"CHF": "CHF",
"CHZ": "CHZ",
"CLF": "CLF",
"CLP": "CLP",
"CLV": "CLV",
"CNH": "CNH",
"CNY": "CNY",
"COMP": "COMP",
"COP": "COP",
"CRC": "CRC",
"CRV": "CRV",
"CTSI": "CTSI",
"CUC": "CUC",
"CVC": "CVC",
"CVE": "CVE",
"CZK": "CZK",
"DAI": "DAI",
"DASH": "DASH",
"DJF": "DJF",
"DKK": "DKK",
"DNT": "DNT",
"DOGE": "DOGE",
"DOP": "DOP",
"DOT": "DOT",
"DZD": "DZD",
"EGP": "EGP",
"ENJ": "ENJ",
"EOS": "EOS",
"ERN": "ERN",
"ETB": "ETB",
"ETC": "ETC",
"ETH": "ETH",
"ETH2": "ETH2",
"EUR": "EUR",
"FET": "FET",
"FIL": "FIL",
"FJD": "FJD",
"FKP": "FKP",
"FORTH": "FORTH",
"GBP": "GBP",
"GBX": "GBX",
"GEL": "GEL",
"GGP": "GGP",
"GHS": "GHS",
"GIP": "GIP",
"GMD": "GMD",
"GNF": "GNF",
"GRT": "GRT",
"GTC": "GTC",
"GTQ": "GTQ",
"GYD": "GYD",
"HKD": "HKD",
"HNL": "HNL",
"HRK": "HRK",
"HTG": "HTG",
"HUF": "HUF",
"ICP": "ICP",
"IDR": "IDR",
"ILS": "ILS",
"IMP": "IMP",
"INR": "INR",
"IQD": "IQD",
"ISK": "ISK",
"JEP": "JEP",
"JMD": "JMD",
"JOD": "JOD",
"JPY": "JPY",
"KEEP": "KEEP",
"KES": "KES",
"KGS": "KGS",
"KHR": "KHR",
"KMF": "KMF",
"KNC": "KNC",
"KRW": "KRW",
"KWD": "KWD",
"KYD": "KYD",
"KZT": "KZT",
"LAK": "LAK",
"LBP": "LBP",
"LINK": "LINK",
"LKR": "LKR",
"LPT": "LPT",
"LRC": "LRC",
"LRD": "LRD",
"LSL": "LSL",
"LTC": "LTC",
"LYD": "LYD",
"MAD": "MAD",
"MANA": "MANA",
"MATIC": "MATIC",
"MDL": "MDL",
"MGA": "MGA",
"MIR": "MIR",
"MKD": "MKD",
"MKR": "MKR",
"MLN": "MLN",
"MMK": "MMK",
"MNT": "MNT",
"MOP": "MOP",
"MRO": "MRO",
"MTL": "MTL",
"MUR": "MUR",
"MVR": "MVR",
"MWK": "MWK",
"MXN": "MXN",
"MYR": "MYR",
"MZN": "MZN",
"NAD": "NAD",
"NGN": "NGN",
"NIO": "NIO",
"NKN": "NKN",
"NMR": "NMR",
"NOK": "NOK",
"NPR": "NPR",
"NU": "NU",
"NZD": "NZD",
"OGN": "OGN",
"OMG": "OMG",
"OMR": "OMR",
"OXT": "OXT",
"PAB": "PAB",
"PEN": "PEN",
"PGK": "PGK",
"PHP": "PHP",
"PKR": "PKR",
"PLN": "PLN",
"POLY": "POLY",
"PYG": "PYG",
"QAR": "QAR",
"QNT": "QNT",
"RLY": "RLY",
"REN": "REN",
"REP": "REP",
"REPV2": "REPV2",
"RLC": "RLC",
"RON": "RON",
"RSD": "RSD",
"RUB": "RUB",
"RWF": "RWF",
"SAR": "SAR",
"SBD": "SBD",
"SCR": "SCR",
"SEK": "SEK",
"SGD": "SGD",
"SHIB": "SHIB",
"SHP": "SHP",
"SKL": "SKL",
"SLL": "SLL",
"SNX": "SNX",
"SOL": "SOL",
"SOS": "SOS",
"SRD": "SRD",
"SSP": "SSP",
"STD": "STD",
"STORJ": "STORJ",
"SUSHI": "SUSHI",
"SVC": "SVC",
"SZL": "SZL",
"THB": "THB",
"TJS": "TJS",
"TMM": "TMM",
"TMT": "TMT",
"TND": "TND",
"TOP": "TOP",
"TRB": "TRB",
"TRY": "TRY",
"TTD": "TTD",
"TWD": "TWD",
"TZS": "TZS",
"UAH": "UAH",
"UGX": "UGX",
"UMA": "UMA",
"UNI": "UNI",
"USD": "USD",
"USDC": "USDC",
"USDT": "USDT",
"UYU": "UYU",
"UZS": "UZS",
"VES": "VES",
"VND": "VND",
"VUV": "VUV",
"WBTC": "WBTC",
"WST": "WST",
"XAF": "XAF",
"XAG": "XAG",
"XAU": "XAU",
"XCD": "XCD",
"XDR": "XDR",
"XLM": "XLM",
"XOF": "XOF",
"XPD": "XPD",
"XPF": "XPF",
"XPT": "XPT",
"XRP": "XRP",
"XTZ": "XTZ",
"YER": "YER",
"YFI": "YFI",
"ZAR": "ZAR",
"ZEC": "ZEC",
"ZMW": "ZMW",
"ZRX": "ZRX",
"ZWL": "ZWL",
}
RATES = {
"1INCH": "1INCH",
"AAVE": "AAVE",
"ADA": "ADA",
"AED": "AED",
"AFN": "AFN",
"ALGO": "ALGO",
"ALL": "ALL",
"AMD": "AMD",
"ANG": "ANG",
"ANKR": "ANKR",
"AOA": "AOA",
"ARS": "ARS",
"ATOM": "ATOM",
"AUCTION": "AUCTION",
"AUD": "AUD",
"AWG": "AWG",
"AZN": "AZN",
"BAL": "BAL",
"BAM": "BAM",
"BAND": "BAND",
"BAT": "BAT",
"BBD": "BBD",
"BCH": "BCH",
"BDT": "BDT",
"BGN": "BGN",
"BHD": "BHD",
"BIF": "BIF",
"BMD": "BMD",
"BND": "BND",
"BNT": "BNT",
"BOB": "BOB",
"BRL": "BRL",
"BSD": "BSD",
"BSV": "BSV",
"BTC": "BTC",
"BTN": "BTN",
"BWP": "BWP",
"BYN": "BYN",
"BYR": "BYR",
"BZD": "BZD",
"CAD": "CAD",
"CDF": "CDF",
"CGLD": "CGLD",
"CHF": "CHF",
"CLF": "CLF",
"CLP": "CLP",
"CLV": "CLV",
"CNH": "CNH",
"CNY": "CNY",
"COMP": "COMP",
"COP": "COP",
"CRC": "CRC",
"CRV": "CRV",
"CUC": "CUC",
"CVC": "CVC",
"CVE": "CVE",
"CZK": "CZK",
"DAI": "DAI",
"DASH": "DASH",
"DJF": "DJF",
"DKK": "DKK",
"DNT": "DNT",
"DOP": "DOP",
"DZD": "DZD",
"EGP": "EGP",
"ENJ": "ENJ",
"EOS": "EOS",
"ERN": "ERN",
"ETB": "ETB",
"ETC": "ETC",
"ETH": "ETH",
"ETH2": "ETH2",
"EUR": "EUR",
"FET": "FET",
"FIL": "FIL",
"FJD": "FJD",
"FKP": "FKP",
"FORTH": "FORTH",
"GBP": "GBP",
"GBX": "GBX",
"GEL": "GEL",
"GGP": "GGP",
"GHS": "GHS",
"GIP": "GIP",
"GMD": "GMD",
"GNF": "GNF",
"GRT": "GRT",
"GTQ": "GTQ",
"GYD": "GYD",
"HKD": "HKD",
"HNL": "HNL",
"HRK": "HRK",
"HTG": "HTG",
"HUF": "HUF",
"IDR": "IDR",
"ILS": "ILS",
"IMP": "IMP",
"INR": "INR",
"IQD": "IQD",
"ISK": "ISK",
"JEP": "JEP",
"JMD": "JMD",
"JOD": "JOD",
"JPY": "JPY",
"KES": "KES",
"KGS": "KGS",
"KHR": "KHR",
"KMF": "KMF",
"KNC": "KNC",
"KRW": "KRW",
"KWD": "KWD",
"KYD": "KYD",
"KZT": "KZT",
"LAK": "LAK",
"LBP": "LBP",
"LINK": "LINK",
"LKR": "LKR",
"LRC": "LRC",
"LRD": "LRD",
"LSL": "LSL",
"LTC": "LTC",
"LYD": "LYD",
"MAD": "MAD",
"MANA": "MANA",
"MATIC": "MATIC",
"MDL": "MDL",
"MGA": "MGA",
"MKD": "MKD",
"MKR": "MKR",
"MMK": "MMK",
"MNT": "MNT",
"MOP": "MOP",
"MRO": "MRO",
"MTL": "MTL",
"MUR": "MUR",
"MVR": "MVR",
"MWK": "MWK",
"MXN": "MXN",
"MYR": "MYR",
"MZN": "MZN",
"NAD": "NAD",
"NGN": "NGN",
"NIO": "NIO",
"NKN": "NKN",
"NMR": "NMR",
"NOK": "NOK",
"NPR": "NPR",
"NU": "NU",
"NZD": "NZD",
"OGN": "OGN",
"OMG": "OMG",
"OMR": "OMR",
"OXT": "OXT",
"PAB": "PAB",
"PEN": "PEN",
"PGK": "PGK",
"PHP": "PHP",
"PKR": "PKR",
"PLN": "PLN",
"POLY": "POLY",
"PYG": "PYG",
"QAR": "QAR",
"RLY": "RLY",
"REN": "REN",
"REP": "REP",
"RON": "RON",
"RSD": "RSD",
"RUB": "RUB",
"RWF": "RWF",
"SAR": "SAR",
"SBD": "SBD",
"SCR": "SCR",
"SEK": "SEK",
"SGD": "SGD",
"SHIB": "SHIB",
"SHP": "SHP",
"SKL": "SKL",
"SLL": "SLL",
"SNX": "SNX",
"SOS": "SOS",
"SRD": "SRD",
"SSP": "SSP",
"STD": "STD",
"STORJ": "STORJ",
"SUSHI": "SUSHI",
"SVC": "SVC",
"SZL": "SZL",
"THB": "THB",
"TJS": "TJS",
"TMT": "TMT",
"TND": "TND",
"TOP": "TOP",
"TRY": "TRY",
"TTD": "TTD",
"TWD": "TWD",
"TZS": "TZS",
"UAH": "UAH",
"UGX": "UGX",
"UMA": "UMA",
"UNI": "UNI",
"USD": "USD",
"USDC": "USDC",
"UYU": "UYU",
"UZS": "UZS",
"VES": "VES",
"VND": "VND",
"VUV": "VUV",
"WBTC": "WBTC",
"WST": "WST",
"XAF": "XAF",
"XAG": "XAG",
"XAU": "XAU",
"XCD": "XCD",
"XDR": "XDR",
"XLM": "XLM",
"XOF": "XOF",
"XPD": "XPD",
"XPF": "XPF",
"XPT": "XPT",
"XTZ": "XTZ",
"YER": "YER",
"YFI": "YFI",
"ZAR": "ZAR",
"ZEC": "ZEC",
"ZMW": "ZMW",
"ZRX": "ZRX",
"ZWL": "ZWL",
}
|
|
import math
import time
from cspace import CSpace
def default_sampleneighborhood(c,r):
return [ci + random.uniform(-r,r) for ci in c]
def default_visible(a,b):
raise RuntimeError("Can't check visibility")
def default_distance(a,b):
return math.sqrt(math.pow(ai-bi,2) for (ai,bi) in zip(a,b))
def default_interpolate(a,b,u):
return [ai+u*(bi-ai) for (ai,bi) in zip(a,b)]
def makedefault(space):
"""Helper: makes a space's callbacks perform the default Cartesian space
operations."""
space.sampleneighborhood = default_sampleneighborhood
space.visible = default_visible
space.distance = default_distance
space.interpolate = default_interpolate
class CompositeCSpace(CSpace):
"""A cartesian product of multiple spaces, given as a list upon
construction. The feasible method can be overloaded to include
interaction tests."""
def __init__(self,spaces):
CSpace.__init__(self)
self.spaces = spaces
#construct optional methods
def sampleneighborhood(c,r):
return self.join(s.sampleneighborhood(cs,r) for (s,cs) in zip(self.spaces,self.split(c)))
def visible(a,b):
return all(s.visible(ai,bi) for (s,ai,bi) in zip(self.spaces,self.split(a),self.split(b)))
def distance(a,b):
return sum(s.distance(ai,bi) for (s,ai,bi) in zip(self.spaces,self.split(a),self.split(b)))
def interpolate(a,b,u):
return self.join(s.interpolate(ai,bi,u) for (s,ai,bi) in zip(self.spaces,self.split(a),self.split(b)))
if any(hasattr(s,'sampleneighborhood') for s in spaces):
for s in self.spaces:
if not hasattr(s,'sampleneighborhood'):
s.sampleneighborhood = defaultsampleneighborhood
self.sampleneighborhood = sampleneighborhood
if any(hasattr(s,'visible') for s in spaces):
for s in self.spaces:
if not hasattr(s,'visible'):
s.visible = defaultvisible
self.visible = visible
if any(hasattr(s,'distance') for s in spaces):
for s in self.spaces:
if not hasattr(s,'distance'):
s.distance = defaultdistance
self.distance = distance
if any(hasattr(s,'interpolate') for s in spaces):
for s in self.spaces:
if not hasattr(s,'interpolate'):
s.interpolate = defaultinterpolate
self.interpolate = interpolate
def subDims(self):
return [len(s.sample()) for s in self.spaces]
def split(self,x):
d = self.subDims()
res = []
pos = 0
for di in d:
res.append(x[pos:pos+di])
pos += di
return res
def join(self,xs):
res = []
for x in xs:
res += x
return res
def feasible(self,x):
for (xi,si) in zip(self.split(x),self.spaces):
if not si.feasible(xi):
return False
return True
def sample(self):
return self.join(s.sample() for s in self.spaces)
class EmbeddedCSpace(CSpace):
"""A subspace of an ambient space, with the active DOFs given by a list
of DOF indices of that ambient space.
Attributes:
- ambientspace: the ambient configuration space
- mapping: the list of active indices into the ambient configuration
space
- xinit: the initial configuration in the ambient space (by default, 0)
"""
def __init__(self,ambientspace,subset,xinit=None):
CSpace.__init__(self)
self.ambientspace = ambientspace
n = len(ambientspace.sample())
self.mapping = subset
#start at the zero config if no initial configuration is given
if xinit==None:
self.xinit = [0.0]*n
else:
self.xinit = xinit
#construct optional methods
def sampleneighborhood(c,r):
return self.project(self.ambientspace.sampleneighborhood(self.lift(c),r))
def visible(a,b):
return self.ambientspace.visible(self.lift(a),self.lift(b))
def distance(a,b):
return self.ambientspace.distance(self.lift(a),self.lift(b))
def interpolate(a,b,u):
return self.project(self.ambientspace.interpolate(self.lift(a)),self.lift(b))
if hasattr(ambientspace,'sampleneighborhood'):
self.sampleneighborhood = sampleneighborhood
if hasattr(ambientspace,'visible'):
self.visible = visible
if hasattr(ambientspace,'distance'):
self.distance = distance
if hasattr(ambientspace,'interpolate'):
self.interpolate = interpolate
self.eps = self.ambientspace.eps
self.bound = [self.ambientspace.bound[i] for i in self.mapping]
self.properties = self.ambientspace.properties
if self.ambientspace.feasibilityTests is not None:
self.feasibilityTests = [(lambda x:f(self.lift(x))) for f in self.ambientspace.feasibilityTests]
self.feasibilityTestNames = [(lambda x:f(self.lift(x))) for f in self.ambientspace.feasibilityTestNames]
def project(self,xamb):
"""Ambient space -> embedded space"""
return [xamb[i] for i in self.mapping]
def lift(self,xemb):
"""Embedded space -> ambient space"""
xamb = self.xinit[:]
for (i,j) in enumerate(self.mapping):
xamb[j] = xemb[i]
return xamb
def feasible(self,x):
return self.ambientspace.feasible(self.lift(x))
def sample(self):
return self.project(self.ambientspace.sample())
class ZeroTest:
"""A test that evaluates to 0 at the feasible set"""
def __init__(self):
self.name = '0'
self.type = 'constant'
self.dist = lambda(x): 1 if x != 0 else 0
def __call__(self,*args):
return self.dist(*args)
def setConstant(self,val):
self.name = str(val)
self.type = 'constant'
if isinstance(val,(int,float)):
self.dist = lambda(x): x-val
else:
self.dist = lambda(x): 1 if x != val else 0
def setCondition(self,f,name=None):
"""Let this be 0 whenever f evaluates to True"""
if name: self.name = name
else: self.name = "f(x)"
self.f = f
self.dist = lambda(x): 0 if f(x) else 1
def setComparison(self,f,cmp,rhs,name=None):
if name: self.name = name+cmp+str(rhs)
else: self.name = "f(x)"+cmp+str(rhs)
self.f = f
self.cmp = cmp
self.rhs = rhs
def lessPenalty(x,y):
if isinstance(x,int) and isinstance(y,int):
return max(1+x-y,0)
else:
return max(x-y,0)
def greaterPenalty(x,y):
if isinstance(x,int) and isinstance(y,int):
return max(1+y-x,0)
else:
return max(y-x,0)
comparisons = {'==':lambda x,y:abs(x-y),
'>=':lambda x,y:max(y-x,0),
'<=':lambda x,y:max(x-y,0),
'<':lessPenalty,
'>':greaterPenalty}
self.dist = comparisons[cmp]
class AdaptiveZeroTester:
"""Tests a set of tests f1(x),...,fn(x) for equality to zero.
Maintains statistics about evaluation time, success rate,
max/average deviation from zero. The stats are then used to determine
the optimal testing order.
"""
def __init__(self):
self.tests = []
self.test_ids = []
def add_test(self,f,id=None):
"""Adds an instance of the ZeroTest f to the AdaptiveZeroTester"""
if id is None:
id = len(self.tests)
self.tests.append(f)
self.test_ids.append(id)
self.reset_history(self.tests[-1])
def update_order(self):
"""Given the empirical costs / failures of testing, returns the optimal
order to find the first failure."""
thelist = [(f._sum_cost/f._num_fail,id,f) for f,id in zip(self.tests,self.test_ids)]
thelist = sorted(thelist)
self.tests = [item[2] for item in thelist]
self.test_ids = [item[1] for item in thelist]
def testmax(self,*args):
"""Tests all tests, returning the max absolute deviation from 0."""
vmax = 0.0
for f in self.tests:
t1 = time.time()
res = f(*args)
t2 = time.time()
self.update_stats(f,t2-t1,res)
vmax = max(vmax,abs(res))
self.update_order()
return vmax
def test(self,*args):
"""Tests whether *args passes all tests. Updates the stats and
internal order"""
for f in self.tests:
t1 = time.time()
res = f(*args)
t2 = time.time()
self.update_stats(f,t2-t1,res)
if res != 0:
self.update_order()
return False
self.update_order()
return True
def expectation(self):
"""Returns (expected cost, expected success) of testing all tests
in the current order."""
c = 0.0
p = 1.0
for f in self.tests:
avgcost = f._sum_cost/(f._num_pass+f._num_fail)
failrate = float(f._num_fail)/(f._num_pass+f._num_fail)
c += p*avgcost
p *= failrate
return (c,p)
def reset_history(self,f,avg_cost=1.0,pr_pass=0.5,evidence=2.0):
f._sum_cost = avg_cost*evidence
f._num_pass = pr_pass*evidence
f._num_fail = (1.0-pr_pass)*evidence
f._max_dist = 0.
f._sum_dist = 0.
def update_stats(self,f,cost,res):
f._sum_cost += cost
if res==0: f._num_pass += 1
else:
f._num_fail += 1
f._max_dist = max(f._max_dist,abs(res))
f._sum_dist += abs(res)
def stats(self,f):
"""Returns a dictionary describing the statistics of f"""
res = dict()
res['average cost']=f._sum_cost/(f._num_pass+f._num_fail)
res['pass rate']=float(f._num_pass)/(f._num_pass+f._num_fail)
res['evaluations']=f._num_pass+f._num_fail
res['max distance']=f._max_dist
res['average distance']=float(f._sum_dist)/(f._num_pass+f._num_fail)
return res
def init_stats(self,f,d):
"""Given a dictionary returned by a stats() call, fills in the
appropriate statistics of f"""
f._sum_cost = d['average cost']*d['evaluations']
f._num_pass = d['evaluations']*d['pass rate']
f._num_fail = d['evaluations']*(1.0-d['pass rate'])
f._max_dist = d['max distance']
f._sum_dist = d['average distance']*d['evaluations']
class AdaptiveCSpace(CSpace,AdaptiveZeroTester):
"""A cspace with an adaptive feasibility checker. Subclasses
fill out feasibility tests using addFeasibleTest (binary conditions)
or addFeasibleComp (inequalities). The cspace will then learn the
characteristics of each test and find the (near) optimal testing
order."""
def __init__(self):
CSpace.__init__(self)
AdaptiveZeroTester.__init__(self)
def addFeasibleTest(self,f,name):
t = ZeroTest()
t.setCondition(f,name)
self.add_test(t,name)
def addFeasibleComp(self,f,cmp,val,name):
t = ZeroTest()
t.setComparison(f,cmp,val,name)
self.add_test(t)
def feasible(self,x):
return self.test(x)
def stats(self):
"""Retreives the feasibility test stats."""
res = dict()
for t in self.tests:
res[t.name] = AdaptiveZeroTester.stats(self,t)
return res
def init_stats(self,d):
"""Given a dictionary of (name,dict) pairs returned from stats(),
initializes the zero tester stats."""
for (k,v) in d.iteritems():
found = False
for t in self.tests:
if t.name == k:
AdaptiveZeroTester.init_stats(self,t,v)
found = True
break
if not Found:
raise RuntimeError("init_stats: key '"+key+"' not found")
|
|
'''Base classes and helpers for building zone specific tzinfo classes'''
from datetime import datetime, timedelta, tzinfo
from bisect import bisect_right
try:
set
except NameError:
from sets import Set as set
import pytz
from pytz.exceptions import AmbiguousTimeError, NonExistentTimeError
__all__ = []
_timedelta_cache = {}
def memorized_timedelta(seconds):
'''Create only one instance of each distinct timedelta'''
try:
return _timedelta_cache[seconds]
except KeyError:
delta = timedelta(seconds=seconds)
_timedelta_cache[seconds] = delta
return delta
_epoch = datetime.utcfromtimestamp(0)
_datetime_cache = {0: _epoch}
def memorized_datetime(seconds):
'''Create only one instance of each distinct datetime'''
try:
return _datetime_cache[seconds]
except KeyError:
# NB. We can't just do datetime.utcfromtimestamp(seconds) as this
# fails with negative values under Windows (Bug #90096)
dt = _epoch + timedelta(seconds=seconds)
_datetime_cache[seconds] = dt
return dt
_ttinfo_cache = {}
def memorized_ttinfo(*args):
'''Create only one instance of each distinct tuple'''
try:
return _ttinfo_cache[args]
except KeyError:
ttinfo = (
memorized_timedelta(args[0]),
memorized_timedelta(args[1]),
args[2]
)
_ttinfo_cache[args] = ttinfo
return ttinfo
_notime = memorized_timedelta(0)
def _to_seconds(td):
'''Convert a timedelta to seconds'''
return td.seconds + td.days * 24 * 60 * 60
class BaseTzInfo(tzinfo):
# Overridden in subclass
_utcoffset = None
_tzname = None
zone = None
def __str__(self):
return self.zone
class StaticTzInfo(BaseTzInfo):
'''A timezone that has a constant offset from UTC
These timezones are rare, as most locations have changed their
offset at some point in their history
'''
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo is not self:
raise ValueError('fromutc: dt.tzinfo is not self')
return (dt + self._utcoffset).replace(tzinfo=self)
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return _notime
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
is_dst is ignored for StaticTzInfo, and exists only to
retain compatibility with DstTzInfo.
'''
return self._tzname
def localize(self, dt, is_dst=False):
'''Convert naive time to local time'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
return dt.replace(tzinfo=self)
def normalize(self, dt, is_dst=False):
'''Correct the timezone information on the given datetime.
This is normally a no-op, as StaticTzInfo timezones never have
ambiguous cases to correct:
>>> from pytz import timezone
>>> gmt = timezone('GMT')
>>> isinstance(gmt, StaticTzInfo)
True
>>> dt = datetime(2011, 5, 8, 1, 2, 3, tzinfo=gmt)
>>> gmt.normalize(dt) is dt
True
The supported method of converting between timezones is to use
datetime.astimezone(). Currently normalize() also works:
>>> la = timezone('America/Los_Angeles')
>>> dt = la.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> gmt.normalize(dt).strftime(fmt)
'2011-05-07 08:02:03 GMT (+0000)'
'''
if dt.tzinfo is self:
return dt
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
return dt.astimezone(self)
def __repr__(self):
return '<StaticTzInfo %r>' % (self.zone,)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (self.zone,)
class DstTzInfo(BaseTzInfo):
'''A timezone that has a variable offset from UTC
The offset might change if daylight savings time comes into effect,
or at a point in history when the region decides to change their
timezone definition.
'''
# Overridden in subclass
_utc_transition_times = None # Sorted list of DST transition times in UTC
_transition_info = None # [(utcoffset, dstoffset, tzname)] corresponding
# to _utc_transition_times entries
zone = None
# Set in __init__
_tzinfos = None
_dst = None # DST offset
def __init__(self, _inf=None, _tzinfos=None):
if _inf:
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = _inf
else:
_tzinfos = {}
self._tzinfos = _tzinfos
self._utcoffset, self._dst, self._tzname = self._transition_info[0]
_tzinfos[self._transition_info[0]] = self
for inf in self._transition_info[1:]:
if inf not in _tzinfos:
_tzinfos[inf] = self.__class__(inf, _tzinfos)
def fromutc(self, dt):
'''See datetime.tzinfo.fromutc'''
if dt.tzinfo is not None and dt.tzinfo._tzinfos is not self._tzinfos:
raise ValueError('fromutc: dt.tzinfo is not self')
dt = dt.replace(tzinfo=None)
idx = max(0, bisect_right(self._utc_transition_times, dt) - 1)
inf = self._transition_info[idx]
return (dt + inf[0]).replace(tzinfo=self._tzinfos[inf])
def normalize(self, dt):
'''Correct the timezone information on the given datetime
If date arithmetic crosses DST boundaries, the tzinfo
is not magically adjusted. This method normalizes the
tzinfo to the correct one.
To test, first we need to do some setup
>>> from pytz import timezone
>>> utc = timezone('UTC')
>>> eastern = timezone('US/Eastern')
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
We next create a datetime right on an end-of-DST transition point,
the instant when the wallclocks are wound back one hour.
>>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc)
>>> loc_dt = utc_dt.astimezone(eastern)
>>> loc_dt.strftime(fmt)
'2002-10-27 01:00:00 EST (-0500)'
Now, if we subtract a few minutes from it, note that the timezone
information has not changed.
>>> before = loc_dt - timedelta(minutes=10)
>>> before.strftime(fmt)
'2002-10-27 00:50:00 EST (-0500)'
But we can fix that by calling the normalize method
>>> before = eastern.normalize(before)
>>> before.strftime(fmt)
'2002-10-27 01:50:00 EDT (-0400)'
The supported method of converting between timezones is to use
datetime.astimezone(). Currently, normalize() also works:
>>> th = timezone('Asia/Bangkok')
>>> am = timezone('Europe/Amsterdam')
>>> dt = th.localize(datetime(2011, 5, 7, 1, 2, 3))
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> am.normalize(dt).strftime(fmt)
'2011-05-06 20:02:03 CEST (+0200)'
'''
if dt.tzinfo is None:
raise ValueError('Naive time - no tzinfo set')
# Convert dt in localtime to UTC
offset = dt.tzinfo._utcoffset
dt = dt.replace(tzinfo=None)
dt = dt - offset
# convert it back, and return it
return self.fromutc(dt)
def localize(self, dt, is_dst=False):
'''Convert naive time to local time.
This method should be used to construct localtimes, rather
than passing a tzinfo argument to a datetime constructor.
is_dst is used to determine the correct timezone in the ambigous
period at the end of daylight savings time.
>>> from pytz import timezone
>>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)'
>>> amdam = timezone('Europe/Amsterdam')
>>> dt = datetime(2004, 10, 31, 2, 0, 0)
>>> loc_dt1 = amdam.localize(dt, is_dst=True)
>>> loc_dt2 = amdam.localize(dt, is_dst=False)
>>> loc_dt1.strftime(fmt)
'2004-10-31 02:00:00 CEST (+0200)'
>>> loc_dt2.strftime(fmt)
'2004-10-31 02:00:00 CET (+0100)'
>>> str(loc_dt2 - loc_dt1)
'1:00:00'
Use is_dst=None to raise an AmbiguousTimeError for ambiguous
times at the end of daylight savings
>>> try:
... loc_dt1 = amdam.localize(dt, is_dst=None)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
is_dst defaults to False
>>> amdam.localize(dt) == amdam.localize(dt, False)
True
is_dst is also used to determine the correct timezone in the
wallclock times jumped over at the start of daylight savings time.
>>> pacific = timezone('US/Pacific')
>>> dt = datetime(2008, 3, 9, 2, 0, 0)
>>> ploc_dt1 = pacific.localize(dt, is_dst=True)
>>> ploc_dt2 = pacific.localize(dt, is_dst=False)
>>> ploc_dt1.strftime(fmt)
'2008-03-09 02:00:00 PDT (-0700)'
>>> ploc_dt2.strftime(fmt)
'2008-03-09 02:00:00 PST (-0800)'
>>> str(ploc_dt2 - ploc_dt1)
'1:00:00'
Use is_dst=None to raise a NonExistentTimeError for these skipped
times.
>>> try:
... loc_dt1 = pacific.localize(dt, is_dst=None)
... except NonExistentTimeError:
... print('Non-existent')
Non-existent
'''
if dt.tzinfo is not None:
raise ValueError('Not naive datetime (tzinfo is already set)')
# Find the two best possibilities.
possible_loc_dt = set()
for delta in [timedelta(days=-1), timedelta(days=1)]:
loc_dt = dt + delta
idx = max(0, bisect_right(
self._utc_transition_times, loc_dt) - 1)
inf = self._transition_info[idx]
tzinfo = self._tzinfos[inf]
loc_dt = tzinfo.normalize(dt.replace(tzinfo=tzinfo))
if loc_dt.replace(tzinfo=None) == dt:
possible_loc_dt.add(loc_dt)
if len(possible_loc_dt) == 1:
return possible_loc_dt.pop()
# If there are no possibly correct timezones, we are attempting
# to convert a time that never happened - the time period jumped
# during the start-of-DST transition period.
if len(possible_loc_dt) == 0:
# If we refuse to guess, raise an exception.
if is_dst is None:
raise NonExistentTimeError(dt)
# If we are forcing the pre-DST side of the DST transition, we
# obtain the correct timezone by winding the clock forward a few
# hours.
elif is_dst:
return self.localize(
dt + timedelta(hours=6), is_dst=True) - timedelta(hours=6)
# If we are forcing the post-DST side of the DST transition, we
# obtain the correct timezone by winding the clock back.
else:
return self.localize(
dt - timedelta(hours=6), is_dst=False) + timedelta(hours=6)
# If we get this far, we have multiple possible timezones - this
# is an ambiguous case occuring during the end-of-DST transition.
# If told to be strict, raise an exception since we have an
# ambiguous case
if is_dst is None:
raise AmbiguousTimeError(dt)
# Filter out the possiblilities that don't match the requested
# is_dst
filtered_possible_loc_dt = [
p for p in possible_loc_dt
if bool(p.tzinfo._dst) == is_dst
]
# Hopefully we only have one possibility left. Return it.
if len(filtered_possible_loc_dt) == 1:
return filtered_possible_loc_dt[0]
if len(filtered_possible_loc_dt) == 0:
filtered_possible_loc_dt = list(possible_loc_dt)
# If we get this far, we have in a wierd timezone transition
# where the clocks have been wound back but is_dst is the same
# in both (eg. Europe/Warsaw 1915 when they switched to CET).
# At this point, we just have to guess unless we allow more
# hints to be passed in (such as the UTC offset or abbreviation),
# but that is just getting silly.
#
# Choose the earliest (by UTC) applicable timezone.
sorting_keys = {}
for local_dt in filtered_possible_loc_dt:
key = local_dt.replace(tzinfo=None) - local_dt.tzinfo._utcoffset
sorting_keys[key] = local_dt
first_key = sorted(sorting_keys)[0]
return sorting_keys[first_key]
def utcoffset(self, dt, is_dst=None):
'''See datetime.tzinfo.utcoffset
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.utcoffset(ambiguous, is_dst=False)
datetime.timedelta(-1, 73800)
>>> tz.utcoffset(ambiguous, is_dst=True)
datetime.timedelta(-1, 77400)
>>> try:
... tz.utcoffset(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._utcoffset
else:
return self._utcoffset
def dst(self, dt, is_dst=None):
'''See datetime.tzinfo.dst
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.dst(normal)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=False)
datetime.timedelta(0, 3600)
>>> tz.dst(normal, is_dst=True)
datetime.timedelta(0, 3600)
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.dst(ambiguous, is_dst=False)
datetime.timedelta(0)
>>> tz.dst(ambiguous, is_dst=True)
datetime.timedelta(0, 3600)
>>> try:
... tz.dst(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return None
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._dst
else:
return self._dst
def tzname(self, dt, is_dst=None):
'''See datetime.tzinfo.tzname
The is_dst parameter may be used to remove ambiguity during DST
transitions.
>>> from pytz import timezone
>>> tz = timezone('America/St_Johns')
>>> normal = datetime(2009, 9, 1)
>>> tz.tzname(normal)
'NDT'
>>> tz.tzname(normal, is_dst=False)
'NDT'
>>> tz.tzname(normal, is_dst=True)
'NDT'
>>> ambiguous = datetime(2009, 10, 31, 23, 30)
>>> tz.tzname(ambiguous, is_dst=False)
'NST'
>>> tz.tzname(ambiguous, is_dst=True)
'NDT'
>>> try:
... tz.tzname(ambiguous)
... except AmbiguousTimeError:
... print('Ambiguous')
Ambiguous
'''
if dt is None:
return self.zone
elif dt.tzinfo is not self:
dt = self.localize(dt, is_dst)
return dt.tzinfo._tzname
else:
return self._tzname
def __repr__(self):
if self._dst:
dst = 'DST'
else:
dst = 'STD'
if self._utcoffset > _notime:
return '<DstTzInfo %r %s+%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
else:
return '<DstTzInfo %r %s%s %s>' % (
self.zone, self._tzname, self._utcoffset, dst
)
def __reduce__(self):
# Special pickle to zone remains a singleton and to cope with
# database changes.
return pytz._p, (
self.zone,
_to_seconds(self._utcoffset),
_to_seconds(self._dst),
self._tzname
)
def unpickler(zone, utcoffset=None, dstoffset=None, tzname=None):
"""Factory function for unpickling pytz tzinfo instances.
This is shared for both StaticTzInfo and DstTzInfo instances, because
database changes could cause a zones implementation to switch between
these two base classes and we can't break pickles on a pytz version
upgrade.
"""
# Raises a KeyError if zone no longer exists, which should never happen
# and would be a bug.
tz = pytz.timezone(zone)
# A StaticTzInfo - just return it
if utcoffset is None:
return tz
# This pickle was created from a DstTzInfo. We need to
# determine which of the list of tzinfo instances for this zone
# to use in order to restore the state of any datetime instances using
# it correctly.
utcoffset = memorized_timedelta(utcoffset)
dstoffset = memorized_timedelta(dstoffset)
try:
return tz._tzinfos[(utcoffset, dstoffset, tzname)]
except KeyError:
# The particular state requested in this timezone no longer exists.
# This indicates a corrupt pickle, or the timezone database has been
# corrected violently enough to make this particular
# (utcoffset,dstoffset) no longer exist in the zone, or the
# abbreviation has been changed.
pass
# See if we can find an entry differing only by tzname. Abbreviations
# get changed from the initial guess by the database maintainers to
# match reality when this information is discovered.
for localized_tz in tz._tzinfos.values():
if (localized_tz._utcoffset == utcoffset
and localized_tz._dst == dstoffset):
return localized_tz
# This (utcoffset, dstoffset) information has been removed from the
# zone. Add it back. This might occur when the database maintainers have
# corrected incorrect information. datetime instances using this
# incorrect information will continue to do so, exactly as they were
# before being pickled. This is purely an overly paranoid safety net - I
# doubt this will ever been needed in real life.
inf = (utcoffset, dstoffset, tzname)
tz._tzinfos[inf] = tz.__class__(inf, tz._tzinfos)
return tz._tzinfos[inf]
|
|
import os
import warnings
from flask import Blueprint, current_app, request, g, send_from_directory
from flask.globals import _request_ctx_stack
from jinja2 import Environment, PackageLoader
from werkzeug.urls import url_quote_plus
from flask_debugtoolbar.compat import iteritems
from flask_debugtoolbar.toolbar import DebugToolbar
from flask_debugtoolbar.utils import decode_text
module = Blueprint('debugtoolbar', __name__)
def replace_insensitive(string, target, replacement):
"""Similar to string.replace() but is case insensitive
Code borrowed from:
http://forums.devshed.com/python-programming-11/case-insensitive-string-replace-490921.html
"""
no_case = string.lower()
index = no_case.rfind(target.lower())
if index >= 0:
return string[:index] + replacement + string[index + len(target):]
else: # no results so return the original string
return string
def _printable(value):
try:
return decode_text(repr(value))
except Exception as e:
return '<repr(%s) raised %s: %s>' % (
object.__repr__(value), type(e).__name__, e)
class DebugToolbarExtension(object):
_static_dir = os.path.realpath(
os.path.join(os.path.dirname(__file__), 'static'))
_redirect_codes = [301, 302, 303, 304]
def __init__(self, app=None):
self.app = app
self.debug_toolbars = {}
# Configure jinja for the internal templates and add url rules
# for static data
self.jinja_env = Environment(
autoescape=True,
extensions=['jinja2.ext.i18n', 'jinja2.ext.with_'],
loader=PackageLoader(__name__, 'templates'))
self.jinja_env.filters['urlencode'] = url_quote_plus
self.jinja_env.filters['printable'] = _printable
if app is not None:
self.init_app(app)
def init_app(self, app):
for k, v in iteritems(self._default_config(app)):
app.config.setdefault(k, v)
if not app.config['DEBUG_TB_ENABLED']:
return
if not app.config.get('SECRET_KEY'):
raise RuntimeError(
"The Flask-DebugToolbar requires the 'SECRET_KEY' config "
"var to be set")
DebugToolbar.load_panels(app)
app.before_request(self.process_request)
app.after_request(self.process_response)
app.teardown_request(self.teardown_request)
# Monkey-patch the Flask.dispatch_request method
app.dispatch_request = self.dispatch_request
app.add_url_rule('/_debug_toolbar/static/<path:filename>',
'_debug_toolbar.static', self.send_static_file)
app.register_blueprint(module, url_prefix='/_debug_toolbar/views')
def _default_config(self, app):
return {
'DEBUG_TB_ENABLED': app.debug,
'DEBUG_TB_HOSTS': (),
'DEBUG_TB_INTERCEPT_REDIRECTS': True,
'DEBUG_TB_PANELS': (
'flask_debugtoolbar.panels.versions.VersionDebugPanel',
'flask_debugtoolbar.panels.timer.TimerDebugPanel',
'flask_debugtoolbar.panels.headers.HeaderDebugPanel',
'flask_debugtoolbar.panels.request_vars.RequestVarsDebugPanel',
'flask_debugtoolbar.panels.config_vars.ConfigVarsDebugPanel',
'flask_debugtoolbar.panels.template.TemplateDebugPanel',
'flask_debugtoolbar.panels.sqlalchemy.SQLAlchemyDebugPanel',
'flask_debugtoolbar.panels.logger.LoggingPanel',
'flask_debugtoolbar.panels.route_list.RouteListDebugPanel',
'flask_debugtoolbar.panels.profiler.ProfilerDebugPanel',
),
}
def dispatch_request(self):
"""Modified version of Flask.dispatch_request to call process_view."""
req = _request_ctx_stack.top.request
app = current_app
if req.routing_exception is not None:
app.raise_routing_exception(req)
rule = req.url_rule
# if we provide automatic options for this URL and the
# request came with the OPTIONS method, reply automatically
if getattr(rule, 'provide_automatic_options', False) \
and req.method == 'OPTIONS':
return app.make_default_options_response()
# otherwise dispatch to the handler for that endpoint
view_func = app.view_functions[rule.endpoint]
view_func = self.process_view(app, view_func, req.view_args)
return view_func(**req.view_args)
def _show_toolbar(self):
"""Return a boolean to indicate if we need to show the toolbar."""
if request.blueprint == 'debugtoolbar':
return False
hosts = current_app.config['DEBUG_TB_HOSTS']
if hosts and request.remote_addr not in hosts:
return False
return True
def send_static_file(self, filename):
"""Send a static file from the flask-debugtoolbar static directory."""
return send_from_directory(self._static_dir, filename)
def process_request(self):
g.debug_toolbar = self
if not self._show_toolbar():
return
real_request = request._get_current_object()
self.debug_toolbars[real_request] = (
DebugToolbar(real_request, self.jinja_env))
for panel in self.debug_toolbars[real_request].panels:
panel.process_request(real_request)
def process_view(self, app, view_func, view_kwargs):
""" This method is called just before the flask view is called.
This is done by the dispatch_request method.
"""
real_request = request._get_current_object()
try:
toolbar = self.debug_toolbars[real_request]
except KeyError:
return view_func
for panel in toolbar.panels:
new_view = panel.process_view(real_request, view_func, view_kwargs)
if new_view:
view_func = new_view
return view_func
def process_response(self, response):
real_request = request._get_current_object()
if real_request not in self.debug_toolbars:
return response
# Intercept http redirect codes and display an html page with a
# link to the target.
if current_app.config['DEBUG_TB_INTERCEPT_REDIRECTS']:
if (response.status_code in self._redirect_codes and
not real_request.is_xhr):
redirect_to = response.location
redirect_code = response.status_code
if redirect_to:
content = self.render('redirect.html', {
'redirect_to': redirect_to,
'redirect_code': redirect_code
})
response.content_length = len(content)
response.location = None
response.response = [content]
response.status_code = 200
# If the http response code is 200 then we process to add the
# toolbar to the returned html response.
if not (response.status_code == 200 and
response.is_sequence and
response.headers['content-type'].startswith('text/html')):
return response
response_html = response.data.decode(response.charset)
no_case = response_html.lower()
body_end = no_case.rfind('</body>')
if body_end >= 0:
before = response_html[:body_end]
after = response_html[body_end:]
elif no_case.startswith('<!doctype html>'):
before = response_html
after = ''
else:
warnings.warn('Could not insert debug toolbar.'
' </body> tag not found in response.')
return response
toolbar = self.debug_toolbars[real_request]
for panel in toolbar.panels:
panel.process_response(real_request, response)
toolbar_html = toolbar.render_toolbar()
content = ''.join((before, toolbar_html, after))
content = content.encode(response.charset)
response.response = [content]
response.content_length = len(content)
return response
def teardown_request(self, exc):
self.debug_toolbars.pop(request._get_current_object(), None)
def render(self, template_name, context):
template = self.jinja_env.get_template(template_name)
return template.render(**context)
|
|
"""
A script to implement a hypergeometric test procedure.
Author: David Angeles
Date: May 26, 2015
Requires Python > 3.5
Needs:
A tissue dictionary
A control list of gene names
An experimental list of gene names
"""
# -*- coding: utf-8 -*-
import pandas as pd
from scipy import stats
import numpy as np
import os
import sys
from urllib.request import urlopen
import contextlib
def pass_list(user_provided, tissue_dictionary):
"""
A function to check which genes in provided list are in the dictionary.
"""
ind = tissue_dictionary.wbid.isin(user_provided)
present = tissue_dictionary[ind].wbid
return present
def hgf(gene_list, tissue_df):
"""
Given a list, returns the p-value for each tissue tested.
Given a list of tissues and a gene-tissue dictionary,
returns a p-dictionary for the enrichment of every tissue
(a p-dictionary is a vector of length equal to the number
of tissues in the tissue_dictionary, sorted by value).
The entries of the p-vector are p-values not corrected
for multiple hypothesis testing.
gene_list should be a list or list-like
tissue_dictionary should be a pandas df
"""
# figure out what genes are in the user provided list
# wanted = pass_list(gene_list, tissue_df)
# re-index the dictionary s.t. the wbid is the index
tissue_df = tissue_df.set_index('wbid')
# number of balls per tissue in the dictionary
sums_of_tissues = tissue_df.sum()
# total labels in the dictionary
total_balls = tissue_df.sum().sum()
# slice out the rows from tissue_dictionary that came from the list
wanted_dictionary = tissue_df.reindex(gene_list)
# get the total number of labels from each tissue
wanted_sum = wanted_dictionary.sum()
# get the total number of balls provided by the user that are in dictionary
picked = wanted_sum.sum()
# make a hash with the p-values for enrichment of each tissue.
p_hash = {}
exp_hash = {}
for i, name in enumerate(tissue_df.columns.values):
# if the total number of genes is zero, return p= 1 for all tissues
if picked == 0:
p_hash[name] = 1
continue
# if a certain tissue has never been called, don't test it
if wanted_sum[name] == 0:
p_hash[name] = 1
continue
# no. of balls of color name picked
# total number of balls in urn
# total number of balls of color name in urn
# total number of balls picked out
n_obs = wanted_sum[name]
s_tissue = sums_of_tissues[name]
p_hash[name] = stats.hypergeom.sf(n_obs, total_balls, s_tissue,
picked)
exp_hash[name] = stats.hypergeom.mean(total_balls, s_tissue,
picked)
# return the p-values, the genes associated with each tissue and the user
# provided genes associate with each tissue.
return p_hash, exp_hash, wanted_dictionary
def benjamini_hochberg_stepup(p_vals):
"""
Given a list of p-values, apply FDR correction and return the q values.
"""
# sort the p_values, but keep the index listed
index = [i[0] for i in sorted(enumerate(p_vals), key=lambda x:x[1])]
# keep the p_values sorted
p_vals = sorted(p_vals)
q_vals = [None]*len(p_vals) # initialize an empty list
prev_q = 0
# BH Step Up begins here.
for i, p in enumerate(p_vals):
q = len(p_vals)/(i+1)*p # calculate the q_value for the current point
q = min(q, 1) # if q >1, make it == 1
q = max(q, prev_q) # preserve monotonicity
q_vals[i] = q # store the q_value
prev_q = q # update the previous q_value
# prevent the lowest q value from going to zero
if np.sum(q_vals == 0) > 0:
# set the min q-value to 10x less than the smallest non-zero value
q_vals[np.where(q_vals == 0)] = np.min(q_vals[np.where(q_vals != 0)])/10
# return q_vals and the index so we can match up each q-value to its index
return q_vals, index
def return_enriched_tissues(p_hash, alpha):
"""A function index p-values and call the FDR function."""
# initialize a list, a hash and a counter
p_values = list(p_hash.values())
keys = list(p_hash.keys())
# FDR
q_values, index = benjamini_hochberg_stepup(p_values)
q_hash = {keys[index[pair[0]]]: pair[1] for pair in enumerate(q_values)}
return q_hash
def enrichment_analysis(gene_list, tissue_df, alpha=0.05, aname='', save=False,
show=False):
"""
Execute complete enrichment analysis (hypergeometric test, BH correction).
------
Params:
gene_list: a list of non-redundant WBIDs
tissue_df: as provided by WormBase (use fetch_dictionary)
alpha: significance threshold, defaults to 0.05
aname= filename to use to save results
show= Whether to print results or not.
-------
output:
df_final - the final dataframe containing enriched tissues
"""
if show:
print('Executing script\n')
# always make iterable
if type(gene_list) in [str]:
gene_list = [gene_list]
if len(gene_list) == 0:
raise ValueError('gene_list is empty!')
# calculate the enrichment
p_hash, exp_hash, wanted_dic = hgf(gene_list, tissue_df)
# FDR correct
q_hash = return_enriched_tissues(p_hash, alpha)
# TODO: is there a better way to do this?
def get_observed(x):
"""A function to find the number of observations of a tissue x."""
return wanted_dic[x].sum()
# slight modification
# make a dataframe, index will be tissues column
df_final = pd.DataFrame.from_dict(exp_hash, orient='index')
# make the tissues their own column:
df_final.reset_index(level=0, inplace=True)
df_final.columns = ['Term', 'Expected']
df_final['Observed'] = df_final.Term.apply(get_observed) # v. slow
df_final['Enrichment Fold Change'] = df_final.Observed/df_final.Expected
df_final['P value'] = df_final.Term.map(p_hash)
df_final['Q value'] = df_final.Term.map(q_hash)
df_final.dropna(inplace=True)
df_final.Observed = df_final.Observed.astype(int)
df_final.sort_values('P value', inplace=True)
df_final = df_final[df_final['Q value'] < alpha]
if show:
if len(df_final) == 0:
print('Analysis returned no enriched tissues.')
else:
print(df_final) # print statement for raymond
if save:
df_final.to_csv(aname)
return df_final
def plot_enrichment_results(df, y='logq', title='', analysis='tissue',
n_bars=15, save=False, **kwargs):
"""
A plot function for TEA.
df: dataframe as output by implement_hypergmt_enrichment_tool
y: One of 'Fold Change', 'Q value' or a user generated column
title - Title for the graph, also file name
analysis - one of `tissue`, `phenotype` or `go`
n_bars: number of bars to be shown, defaults to 15
dirGraps: directory to save figures to. if not existent,
generates a new folder
------
output:
ax - an axis object holding the graph that was generated
"""
import matplotlib.pyplot as plt
import seaborn as sns
# sns.choose_colorbrewer_palette('sequential', as_cmap=False)
if df.empty:
print('dataframe is empty!')
return
if analysis.lower() not in ['tissue', 'phenotype', 'go']:
raise ValueError('analysis variable must be one of' +
'`tissue`, `phenotype` or `go`')
analysis = analysis.lower()
ax = kwargs.pop('ax', None)
ftype = kwargs.pop('ftype', 'svg')
#
if ax is None:
fig, ax = plt.subplots(figsize=(14, 8))
# sort by q value change
df.sort_values(['Q value', 'Enrichment Fold Change'],
ascending=[True, False], inplace=True)
# make a logq bar:
logq = -df['Q value'].apply(np.log10)
# added August 26 2016:
tissue_ID = 11
pheno_ID = 19
go_ID = 10
if analysis == 'phenotype':
yvals = df.Term.str[:-pheno_ID-1]
elif analysis == 'tissue':
yvals = df.Term.str[:-tissue_ID-1]
elif analysis == 'go':
yvals = df.Term.str[:-go_ID-1]
# plot first n_bars
with sns.axes_style('whitegrid'):
ax = sns.barplot(x=logq[:n_bars], y=yvals[:n_bars], ax=ax)
# fix the plot to prettify it
ax.set_ylabel('Terms', fontsize=15)
if y.lower() != 'logq':
ax.set_xlabel(y, fontsize=15)
else:
ax.set_xlabel('$-\log_{10}{q}$', fontsize=15)
ax.tick_params(axis='x', labelsize=13)
ax.tick_params(axis='y', labelsize=13)
ax.set_title(title, fontsize=20)
plt.tight_layout()
# save
if save:
plt.savefig('{0}.{1}'.format(title, ftype), dpi=1200)
return ax
def fetch_dictionary(analysis='tissue'):
"""
Fetch the dictionary we want.
If analysis isn't specified, fetches the tissue dictionary.
Params:
------
analysis - one of `tissue`, `phenotype` or `go`
Output:
data - a dataframe containing the dictionary of interest
"""
analysis = analysis.lower()
if analysis not in ['tissue', 'phenotype', 'go']:
raise ValueError('analysis must be one of `tissue`, `phenotype`' +
' or `go`')
url_tissue = 'http://caltech.wormbase.org/TissueEnrichmentAnalysis/'
if analysis == 'tissue':
url_tissue += 'anatomy_dict.csv'
elif analysis == 'phenotype':
url_tissue += 'phenotype_dict.csv'
elif analysis == 'go':
url_tissue += 'go_dict.csv'
try:
with contextlib.closing(urlopen(url_tissue)) as conn:
data = pd.read_csv(conn)
return data
except:
print('Cannot fetch dictionary. Please check internet connection.')
# ==============================================================================
# ==============================================================================
# ==============================================================================
# ==============================================================================
if __name__ == '__main__':
import re
import argparse
import matplotlib
matplotlib.use('Agg')
import seaborn as sns
sns.set_context('paper')
sns.set_style('whitegrid')
path = './'
os.chdir(path)
defQ = 0.1
parser = argparse.ArgumentParser(description='Run EA.')
parser = argparse.ArgumentParser()
parser.add_argument("gene_list",
help='The full path to the gene list (WBIDs) you would\
like to analyse in .csv format')
parser.add_argument('title', help='Title for your analysis (shouldn\'t\
include file extension)',
type=str)
parser.add_argument('kind', help='What kind of analysis will be ' +
'performed. One of `tissue`, `phenotype` or `go`',
type=str)
parser.add_argument("-d", '--dictionary', nargs='?', help='Provide a\
dictionary to test. If none given, WormBase URL \
will be used to download the corresponding file')
parser.add_argument("-q", help='Qvalue threshold for significance. \
Default is {0} if not provided'.format(defQ),
type=float)
parser.add_argument('-p', '--print', help='Indicate whether you would like \
to print results', action='store_true')
parser.add_argument('-s', "--save", help='Indicate whether to save your \
plot.', action='store_true')
parser.add_argument('-b', "--background", help='Provide a background gene \
set as a csv file with a single column without a \
column name. Gene names must be in wbid format.',
type=str)
parser.add_argument('-m', "--melted_name", help='Name for gene_to_terms \
file. If none provided, defaults to gene_to_terms.csv',
type=str)
args = parser.parse_args()
gl_name = args.gene_list
title = args.title
kind = args.kind
# optional args
# load dictionary:
if args.dictionary:
dict_name = args.tissue_dictionary
dictionary = pd.read_csv(dict_name)
else:
dictionary = fetch_dictionary(analysis=args.kind)
# reduce wbids to background set:
if args.background:
bg = pd.read_csv(args.background, header=None, names=['wbid'])
dictionary = dictionary[dictionary.wbid.isin(bg.wbid.values)]
# warn user if the dictionary is empty after subsetting:
if len(dictionary) == 0:
raise ValueError('Dictionary is empty after subsetting')
# set threshold
if args.q:
q = args.q
else:
q = defQ
# print results
if args.print:
prnt = True
else:
prnt = False
# save results
if args.save:
save = True
else:
save = False
# open gene list:
gene_list = pd.read_csv(gl_name, header=None, names=['wbid'])
# perform enrichment analysis:
df_results = enrichment_analysis(gene_list.wbid.unique(), dictionary,
alpha=q, show=False)
dfname = title + '.tsv'
df_results.to_csv(dfname, index=False, sep='\t')
# melt dictionary:
melted_dict = pd.melt(dictionary, id_vars='wbid', var_name='term',
value_name='found')
melted_dict = melted_dict[melted_dict.found == 1]
# keep only terms that were significant:
melted_dict = melted_dict[melted_dict.term.isin(df_results.Term.values)]
# keep only relevant genes:
melted_dict = melted_dict[melted_dict.wbid.isin(gene_list.wbid)]
# save to file:
if args.melted_name:
melted_dict.to_csv(args.melted_name, index=False)
else:
melted_dict.to_csv('gene_to_terms.csv', index=False)
if prnt:
with open(dfname, 'r') as f:
printer = f.readlines()
for value in printer:
value = value.split('\t')
for val in value:
if re.findall("\d+\.\d+", val):
ind = value.index(val)
x = float(val)
value[ind] = '{0:.2g}'.format(x)
value = '\t'.join(value)
print(value)
if save:
plot_enrichment_results(df_results, title=title, save=save,
analysis=args.kind)
sys.exit()
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2019, Andrew Klychkov (@Andersson007) <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = r'''
---
module: postgresql_table
short_description: Create, drop, or modify a PostgreSQL table
description:
- Allows to create, drop, rename, truncate a table, or change some table attributes
U(https://www.postgresql.org/docs/current/sql-createtable.html).
version_added: '2.8'
options:
table:
description:
- Table name.
required: true
aliases:
- name
type: str
state:
description:
- The table state. I(state=absent) is mutually exclusive with I(tablespace), I(owner), I(unlogged),
I(like), I(including), I(columns), I(truncate), I(storage_params) and, I(rename).
type: str
default: present
choices: [ absent, present ]
tablespace:
description:
- Set a tablespace for the table.
required: false
type: str
owner:
description:
- Set a table owner.
type: str
unlogged:
description:
- Create an unlogged table.
type: bool
default: no
like:
description:
- Create a table like another table (with similar DDL).
Mutually exclusive with I(columns), I(rename), and I(truncate).
type: str
including:
description:
- Keywords that are used with like parameter, may be DEFAULTS, CONSTRAINTS, INDEXES, STORAGE, COMMENTS or ALL.
Needs I(like) specified. Mutually exclusive with I(columns), I(rename), and I(truncate).
type: str
columns:
description:
- Columns that are needed.
type: list
rename:
description:
- New table name. Mutually exclusive with I(tablespace), I(owner),
I(unlogged), I(like), I(including), I(columns), I(truncate), and I(storage_params).
type: str
truncate:
description:
- Truncate a table. Mutually exclusive with I(tablespace), I(owner), I(unlogged),
I(like), I(including), I(columns), I(rename), and I(storage_params).
type: bool
default: no
storage_params:
description:
- Storage parameters like fillfactor, autovacuum_vacuum_treshold, etc.
Mutually exclusive with I(rename) and I(truncate).
type: list
db:
description:
- Name of database to connect and where the table will be created.
type: str
aliases:
- login_db
session_role:
description:
- Switch to session_role after connecting.
The specified session_role must be a role that the current login_user is a member of.
- Permissions checking for SQL commands is carried out as though
the session_role were the one that had logged in originally.
type: str
notes:
- If you do not pass db parameter, tables will be created in the database
named postgres.
- PostgreSQL allows to create columnless table, so columns param is optional.
- The default authentication assumes that you are either logging in as or
sudo'ing to the postgres account on the host.
- To avoid "Peer authentication failed for user postgres" error,
use postgres user as a I(become_user).
- Unlogged tables are available from PostgreSQL server version 9.1
U(https://www.postgresql.org/docs/9.1/sql-createtable.html).
- This module uses psycopg2, a Python PostgreSQL database adapter. You must
ensure that psycopg2 is installed on the host before using this module.
- If the remote host is the PostgreSQL server (which is the default case), then
PostgreSQL must also be installed on the remote host. For Ubuntu-based
systems, install the postgresql, libpq-dev, and python-psycopg2 packages
on the remote host before using this module.
requirements: [ psycopg2 ]
author:
- Andrew Klychkov (@Andersson007)
extends_documentation_fragment: postgres
'''
EXAMPLES = r'''
- name: Create tbl2 in the acme database with the DDL like tbl1 with testuser as an owner
postgresql_table:
db: acme
name: tbl2
like: tbl1
owner: testuser
- name: Create tbl2 in the acme database and tablespace ssd with the DDL like tbl1 including comments and indexes
postgresql_table:
db: acme
table: tbl2
like: tbl1
including: comments, indexes
tablespace: ssd
- name: Create test_table with several columns in ssd tablespace with fillfactor=10 and autovacuum_analyze_threshold=1
postgresql_table:
name: test_table
columns:
- id bigserial primary key
- num bigint
- stories text
tablespace: ssd
storage_params:
- fillfactor=10
- autovacuum_analyze_threshold=1
- name: Create an unlogged table
postgresql_table:
name: useless_data
columns: waste_id int
unlogged: true
- name: Rename table foo to bar
postgresql_table:
table: foo
rename: bar
- name: Set owner to someuser
postgresql_table:
name: foo
owner: someuser
- name: Change tablespace of foo table to new_tablespace and set owner to new_user
postgresql_table:
name: foo
tablespace: new_tablespace
owner: new_user
- name: Truncate table foo
postgresql_table:
name: foo
truncate: yes
- name: Drop table foo
postgresql_table:
name: foo
state: absent
'''
RETURN = r'''
table:
description: Name of a table.
returned: always
type: str
sample: 'foo'
state:
description: Table state.
returned: always
type: str
sample: 'present'
owner:
description: Table owner.
returned: always
type: str
sample: 'postgres'
tablespace:
description: Tablespace.
returned: always
type: str
sample: 'ssd_tablespace'
queries:
description: List of executed queries.
returned: always
type: str
sample: [ 'CREATE TABLE "test_table" (id bigint)' ]
storage_params:
description: Storage parameters.
returned: always
type: list
sample: [ "fillfactor=100", "autovacuum_analyze_threshold=1" ]
'''
try:
import psycopg2
HAS_PSYCOPG2 = True
except ImportError:
HAS_PSYCOPG2 = False
from ansible.module_utils.basic import AnsibleModule, missing_required_lib
from ansible.module_utils.database import SQLParseError, pg_quote_identifier
from ansible.module_utils.postgres import postgres_common_argument_spec
from ansible.module_utils._text import to_native
from ansible.module_utils.six import iteritems
# ===========================================
# PostgreSQL module specific support methods.
#
class Table(object):
def __init__(self, name, module, cursor):
self.name = name
self.module = module
self.cursor = cursor
self.info = {
'owner': '',
'tblspace': '',
'storage_params': [],
}
self.exists = False
self.__exists_in_db()
self.executed_queries = []
def get_info(self):
"""Getter to refresh and get table info"""
self.__exists_in_db()
def __exists_in_db(self):
"""Check table exists and refresh info"""
query = ("SELECT t.tableowner, t.tablespace, c.reloptions "
"FROM pg_tables AS t "
"INNER JOIN pg_class AS c ON c.relname = t.tablename "
"INNER JOIN pg_namespace AS n ON c.relnamespace = n.oid "
"WHERE t.tablename = '%s' "
"AND n.nspname = 'public'" % self.name)
res = self.__exec_sql(query)
if res:
self.exists = True
self.info = dict(
owner=res[0][0],
tblspace=res[0][1] if res[0][1] else '',
storage_params=res[0][2] if res[0][2] else [],
)
return True
else:
self.exists = False
return False
def create(self, columns='', params='', tblspace='',
unlogged=False, owner=''):
"""
Create table.
If table exists, check passed args (params, tblspace, owner) and,
if they're different from current, change them.
Arguments:
params - storage params (passed by "WITH (...)" in SQL),
comma separated.
tblspace - tablespace.
owner - table owner.
unlogged - create unlogged table.
columns - column string (comma separated).
"""
name = pg_quote_identifier(self.name, 'table')
changed = False
if self.exists:
if tblspace == 'pg_default' and self.info['tblspace'] is None:
pass # Because they have the same meaning
elif tblspace and self.info['tblspace'] != tblspace:
self.set_tblspace(tblspace)
changed = True
if owner and self.info['owner'] != owner:
self.set_owner(owner)
changed = True
if params:
param_list = [p.strip(' ') for p in params.split(',')]
new_param = False
for p in param_list:
if p not in self.info['storage_params']:
new_param = True
if new_param:
self.set_stor_params(params)
changed = True
if changed:
return True
return False
query = "CREATE"
if unlogged:
query += " UNLOGGED TABLE %s" % name
else:
query += " TABLE %s" % name
if columns:
query += " (%s)" % columns
else:
query += " ()"
if params:
query += " WITH (%s)" % params
if tblspace:
query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
if self.__exec_sql(query, ddl=True):
self.executed_queries.append(query)
changed = True
if owner:
changed = self.set_owner(owner)
return changed
def create_like(self, src_table, including='', tblspace='',
unlogged=False, params='', owner=''):
"""
Create table like another table (with similar DDL).
Arguments:
src_table - source table.
including - corresponds to optional INCLUDING expression
in CREATE TABLE ... LIKE statement.
params - storage params (passed by "WITH (...)" in SQL),
comma separated.
tblspace - tablespace.
owner - table owner.
unlogged - create unlogged table.
"""
changed = False
name = pg_quote_identifier(self.name, 'table')
query = "CREATE"
if unlogged:
query += " UNLOGGED TABLE %s" % name
else:
query += " TABLE %s" % name
query += " (LIKE %s" % pg_quote_identifier(src_table, 'table')
if including:
including = including.split(',')
for i in including:
query += " INCLUDING %s" % i
query += ')'
if params:
query += " WITH (%s)" % params
if tblspace:
query += " TABLESPACE %s" % pg_quote_identifier(tblspace, 'database')
if self.__exec_sql(query, ddl=True):
self.executed_queries.append(query)
changed = True
if owner:
changed = self.set_owner(owner)
return changed
def truncate(self):
query = "TRUNCATE TABLE %s" % pg_quote_identifier(self.name, 'table')
self.executed_queries.append(query)
return self.__exec_sql(query, ddl=True)
def rename(self, newname):
query = "ALTER TABLE %s RENAME TO %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(newname, 'table'))
self.executed_queries.append(query)
return self.__exec_sql(query, ddl=True)
def set_owner(self, username):
query = "ALTER TABLE %s OWNER TO %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(username, 'role'))
self.executed_queries.append(query)
return self.__exec_sql(query, ddl=True)
def drop(self):
query = "DROP TABLE %s" % pg_quote_identifier(self.name, 'table')
self.executed_queries.append(query)
return self.__exec_sql(query, ddl=True)
def set_tblspace(self, tblspace):
query = "ALTER TABLE %s SET TABLESPACE %s" % (pg_quote_identifier(self.name, 'table'),
pg_quote_identifier(tblspace, 'database'))
self.executed_queries.append(query)
return self.__exec_sql(query, ddl=True)
def set_stor_params(self, params):
query = "ALTER TABLE %s SET (%s)" % (pg_quote_identifier(self.name, 'table'), params)
self.executed_queries.append(query)
return self.__exec_sql(query, ddl=True)
def __exec_sql(self, query, ddl=False):
try:
self.cursor.execute(query)
if not ddl:
res = self.cursor.fetchall()
return res
return True
except SQLParseError as e:
self.module.fail_json(msg=to_native(e))
except psycopg2.ProgrammingError as e:
self.module.fail_json(msg="Cannot execute SQL '%s': %s" % (query, to_native(e)))
return False
# ===========================================
# Module execution.
#
def main():
argument_spec = postgres_common_argument_spec()
argument_spec.update(
table=dict(type='str', required=True, aliases=['name']),
state=dict(type='str', default="present", choices=["absent", "present"]),
db=dict(type='str', default='', aliases=['login_db']),
port=dict(type='int', default=5432, aliases=['login_port']),
ssl_mode=dict(type='str', default='prefer', choices=['allow', 'disable', 'prefer', 'require', 'verify-ca', 'verify-full']),
ca_cert=dict(type='str', aliases=['ssl_rootcert']),
tablespace=dict(type='str'),
owner=dict(type='str'),
unlogged=dict(type='bool'),
like=dict(type='str'),
including=dict(type='str'),
rename=dict(type='str'),
truncate=dict(type='bool'),
columns=dict(type='list'),
storage_params=dict(type='list'),
session_role=dict(type='str'),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
)
table = module.params["table"]
state = module.params["state"]
tablespace = module.params["tablespace"]
owner = module.params["owner"]
unlogged = module.params["unlogged"]
like = module.params["like"]
including = module.params["including"]
newname = module.params["rename"]
storage_params = module.params["storage_params"]
truncate = module.params["truncate"]
columns = module.params["columns"]
sslrootcert = module.params["ca_cert"]
session_role = module.params["session_role"]
# Check mutual exclusive parameters:
if state == 'absent' and (truncate or newname or columns or tablespace or
like or storage_params or unlogged or
owner or including):
module.fail_json(msg="%s: state=absent is mutually exclusive with: "
"truncate, rename, columns, tablespace, "
"including, like, storage_params, unlogged, owner" % table)
if truncate and (newname or columns or like or unlogged or
storage_params or owner or tablespace or including):
module.fail_json(msg="%s: truncate is mutually exclusive with: "
"rename, columns, like, unlogged, including, "
"storage_params, owner, tablespace" % table)
if newname and (columns or like or unlogged or
storage_params or owner or tablespace or including):
module.fail_json(msg="%s: rename is mutually exclusive with: "
"columns, like, unlogged, including, "
"storage_params, owner, tablespace" % table)
if like and columns:
module.fail_json(msg="%s: like and columns params are mutually exclusive" % table)
if including and not like:
module.fail_json(msg="%s: including param needs like param specified" % table)
# To use defaults values, keyword arguments must be absent, so
# check which values are empty and don't include in the **kw
# dictionary
params_map = {
"login_host": "host",
"login_user": "user",
"login_password": "password",
"port": "port",
"db": "database",
"ssl_mode": "sslmode",
"ca_cert": "sslrootcert"
}
kw = dict((params_map[k], v) for (k, v) in iteritems(module.params)
if k in params_map and v != "" and v is not None)
if not HAS_PSYCOPG2:
module.fail_json(msg=missing_required_lib("psycopg2"))
# If a login_unix_socket is specified, incorporate it here.
is_localhost = "host" not in kw or kw["host"] is None or kw["host"] == "localhost"
if is_localhost and module.params["login_unix_socket"] != "":
kw["host"] = module.params["login_unix_socket"]
if psycopg2.__version__ < '2.4.3' and sslrootcert is not None:
module.fail_json(msg='psycopg2 must be at least 2.4.3 in order to user the ca_cert parameter')
try:
db_connection = psycopg2.connect(**kw)
cursor = db_connection.cursor(cursor_factory=psycopg2.extras.DictCursor)
except TypeError as e:
if 'sslrootcert' in e.args[0]:
module.fail_json(msg='Postgresql server must be at least version 8.4 to support sslrootcert')
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
except Exception as e:
module.fail_json(msg="unable to connect to database: %s" % to_native(e))
if session_role:
try:
cursor.execute('SET ROLE %s' % session_role)
except Exception as e:
module.fail_json(msg="Could not switch role: %s" % to_native(e))
if storage_params:
storage_params = ','.join(storage_params)
if columns:
columns = ','.join(columns)
##############
# Do main job:
table_obj = Table(table, module, cursor)
# Set default returned values:
changed = False
kw['table'] = table
kw['state'] = ''
if table_obj.exists:
kw = dict(
table=table,
state='present',
owner=table_obj.info['owner'],
tablespace=table_obj.info['tblspace'],
storage_params=table_obj.info['storage_params'],
)
if state == 'absent':
changed = table_obj.drop()
elif truncate:
changed = table_obj.truncate()
elif newname:
changed = table_obj.rename(newname)
q = table_obj.executed_queries
table_obj = Table(newname, module, cursor)
table_obj.executed_queries = q
elif state == 'present' and not like:
changed = table_obj.create(columns, storage_params,
tablespace, unlogged, owner)
elif state == 'present' and like:
changed = table_obj.create_like(like, including, tablespace,
unlogged, storage_params)
if changed:
if module.check_mode:
db_connection.rollback()
else:
db_connection.commit()
# Refresh table info for RETURN.
# Note, if table has been renamed, it gets info by newname:
table_obj.get_info()
db_connection.commit()
if table_obj.exists:
kw = dict(
table=table,
state='present',
owner=table_obj.info['owner'],
tablespace=table_obj.info['tblspace'],
storage_params=table_obj.info['storage_params'],
)
else:
# We just change the table state here
# to keep other information about the dropped table:
kw['state'] = 'absent'
kw['queries'] = table_obj.executed_queries
kw['changed'] = changed
db_connection.close()
module.exit_json(**kw)
if __name__ == '__main__':
main()
|
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import logbook
import math
import numpy as np
import numpy.linalg as la
from zipline.finance import trading
import pandas as pd
import risk
from . risk import (
alpha,
check_entry,
information_ratio,
sharpe_ratio,
sortino_ratio,
)
log = logbook.Logger('Risk Period')
choose_treasury = functools.partial(risk.choose_treasury,
risk.select_treasury_duration)
class RiskMetricsPeriod(object):
def __init__(self, start_date, end_date, returns,
benchmark_returns=None):
treasury_curves = trading.environment.treasury_curves
if treasury_curves.index[-1] >= start_date:
mask = ((treasury_curves.index >= start_date) &
(treasury_curves.index <= end_date))
self.treasury_curves = treasury_curves[mask]
else:
# our test is beyond the treasury curve history
# so we'll use the last available treasury curve
self.treasury_curves = treasury_curves[-1:]
self.start_date = start_date
self.end_date = end_date
if benchmark_returns is None:
br = trading.environment.benchmark_returns
benchmark_returns = br[(br.index >= returns.index[0]) &
(br.index <= returns.index[-1])]
self.algorithm_returns = self.mask_returns_to_period(returns)
self.benchmark_returns = self.mask_returns_to_period(benchmark_returns)
self.calculate_metrics()
def calculate_metrics(self):
self.benchmark_period_returns = \
self.calculate_period_returns(self.benchmark_returns)
self.algorithm_period_returns = \
self.calculate_period_returns(self.algorithm_returns)
if not self.algorithm_returns.index.equals(
self.benchmark_returns.index
):
message = "Mismatch between benchmark_returns ({bm_count}) and \
algorithm_returns ({algo_count}) in range {start} : {end}"
message = message.format(
bm_count=len(self.benchmark_returns),
algo_count=len(self.algorithm_returns),
start=self.start_date,
end=self.end_date
)
raise Exception(message)
self.num_trading_days = len(self.benchmark_returns)
self.benchmark_volatility = self.calculate_volatility(
self.benchmark_returns)
self.algorithm_volatility = self.calculate_volatility(
self.algorithm_returns)
self.treasury_period_return = choose_treasury(
self.treasury_curves,
self.start_date,
self.end_date
)
self.sharpe = self.calculate_sharpe()
self.sortino = self.calculate_sortino()
self.information = self.calculate_information()
self.beta, self.algorithm_covariance, self.benchmark_variance, \
self.condition_number, self.eigen_values = self.calculate_beta()
self.alpha = self.calculate_alpha()
self.excess_return = self.algorithm_period_returns - \
self.treasury_period_return
self.max_drawdown = self.calculate_max_drawdown()
def to_dict(self):
"""
Creates a dictionary representing the state of the risk report.
Returns a dict object of the form:
"""
period_label = self.end_date.strftime("%Y-%m")
rval = {
'trading_days': self.num_trading_days,
'benchmark_volatility': self.benchmark_volatility,
'algo_volatility': self.algorithm_volatility,
'treasury_period_return': self.treasury_period_return,
'algorithm_period_return': self.algorithm_period_returns,
'benchmark_period_return': self.benchmark_period_returns,
'sharpe': self.sharpe,
'sortino': self.sortino,
'information': self.information,
'beta': self.beta,
'alpha': self.alpha,
'excess_return': self.excess_return,
'max_drawdown': self.max_drawdown,
'period_label': period_label
}
return {k: None if check_entry(k, v) else v
for k, v in rval.iteritems()}
def __repr__(self):
statements = []
metrics = [
"algorithm_period_returns",
"benchmark_period_returns",
"excess_return",
"num_trading_days",
"benchmark_volatility",
"algorithm_volatility",
"sharpe",
"sortino",
"information",
"algorithm_covariance",
"benchmark_variance",
"beta",
"alpha",
"max_drawdown",
"algorithm_returns",
"benchmark_returns",
"condition_number",
"eigen_values"
]
for metric in metrics:
value = getattr(self, metric)
statements.append("{m}:{v}".format(m=metric, v=value))
return '\n'.join(statements)
def mask_returns_to_period(self, daily_returns):
if isinstance(daily_returns, list):
returns = pd.Series([x.returns for x in daily_returns],
index=[x.date for x in daily_returns])
else: # otherwise we're receiving an index already
returns = daily_returns
trade_days = trading.environment.trading_days
trade_day_mask = returns.index.normalize().isin(trade_days)
mask = ((returns.index >= self.start_date) &
(returns.index <= self.end_date) & trade_day_mask)
returns = returns[mask]
return returns
def calculate_period_returns(self, returns):
period_returns = (1. + returns).prod() - 1
return period_returns
def calculate_volatility(self, daily_returns):
return np.std(daily_returns, ddof=1) * math.sqrt(self.num_trading_days)
def calculate_sharpe(self):
"""
http://en.wikipedia.org/wiki/Sharpe_ratio
"""
return sharpe_ratio(self.algorithm_volatility,
self.algorithm_period_returns,
self.treasury_period_return)
def calculate_sortino(self, mar=None):
"""
http://en.wikipedia.org/wiki/Sortino_ratio
"""
if mar is None:
mar = self.treasury_period_return
return sortino_ratio(self.algorithm_returns,
self.algorithm_period_returns,
mar)
def calculate_information(self):
"""
http://en.wikipedia.org/wiki/Information_ratio
"""
return information_ratio(self.algorithm_returns,
self.benchmark_returns)
def calculate_beta(self):
"""
.. math::
\\beta_a = \\frac{\mathrm{Cov}(r_a,r_p)}{\mathrm{Var}(r_p)}
http://en.wikipedia.org/wiki/Beta_(finance)
"""
# it doesn't make much sense to calculate beta for less than two days,
# so return none.
if len(self.algorithm_returns) < 2:
return 0.0, 0.0, 0.0, 0.0, []
returns_matrix = np.vstack([self.algorithm_returns,
self.benchmark_returns])
C = np.cov(returns_matrix, ddof=1)
eigen_values = la.eigvals(C)
condition_number = max(eigen_values) / min(eigen_values)
algorithm_covariance = C[0][1]
benchmark_variance = C[1][1]
beta = algorithm_covariance / benchmark_variance
return (
beta,
algorithm_covariance,
benchmark_variance,
condition_number,
eigen_values
)
def calculate_alpha(self):
"""
http://en.wikipedia.org/wiki/Alpha_(investment)
"""
return alpha(self.algorithm_period_returns,
self.treasury_period_return,
self.benchmark_period_returns,
self.beta)
def calculate_max_drawdown(self):
compounded_returns = []
cur_return = 0.0
for r in self.algorithm_returns:
try:
cur_return += math.log(1.0 + r)
# this is a guard for a single day returning -100%
except ValueError:
log.debug("{cur} return, zeroing the returns".format(
cur=cur_return))
cur_return = 0.0
# BUG? Shouldn't this be set to log(1.0 + 0) ?
compounded_returns.append(cur_return)
cur_max = None
max_drawdown = None
for cur in compounded_returns:
if cur_max is None or cur > cur_max:
cur_max = cur
drawdown = (cur - cur_max)
if max_drawdown is None or drawdown < max_drawdown:
max_drawdown = drawdown
if max_drawdown is None:
return 0.0
return 1.0 - math.exp(max_drawdown)
|
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main logic for training the A2N model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gc
import math
import os
from absl import app
from absl import flags
from absl import logging
import clueweb_text_graph
import dataset
import graph
import losses
import metrics
import models
import numpy as np
import slim
from tensorboard.plugins import projector
import tensorflow as tf
from tensorflow.python.training.summary_io import SummaryWriterCache
import text_graph
import utils
FLAGS = flags.FLAGS
flags.DEFINE_string("kg_file", None, "path to kg file")
flags.DEFINE_string("output_dir", None, "output dir for summaries/logs")
flags.DEFINE_string("dev_kg_file", None, "path to dev kg file")
flags.DEFINE_string("test_kg_file", None, "path to test kg file")
flags.DEFINE_string("model_path", None, "path to model if testing only")
flags.DEFINE_boolean("evaluate", False, "run eval loop")
flags.DEFINE_boolean("test_only", False, "if test only")
flags.DEFINE_integer("global_step", None,
"global_step to restore model for testing")
flags.DEFINE_integer("num_epochs", 5, "number of train epochs")
flags.DEFINE_integer("batchsize", 64, "batchsize for training")
flags.DEFINE_integer("test_batchsize", 10, "batchsize for testing")
flags.DEFINE_integer("max_neighbors", None,
"maximum neighbors to use during training")
flags.DEFINE_integer("max_negatives", None,
"maximum number of negative entities to sample")
flags.DEFINE_integer("emb_dim", 100,
"dimension of entity and relation embeddings")
flags.DEFINE_float("entity_encoder_dropout", 1.0,
"dropout for entity embeddings")
flags.DEFINE_float("relation_encoder_dropout", 1.0,
"dropout for relation embeddings")
flags.DEFINE_float("init_entity_encoder_dropout", 1.0,
"dropout for init entity embeddings in attention")
flags.DEFINE_float("attention_encoder_dropout", 1.0,
"dropout for attention encoder")
flags.DEFINE_boolean("use_separate_attention_emb", False,
"use separate entity embeddings for computing attention")
flags.DEFINE_integer("num_parallel_preprocess", 64,
"number of processes to use in dataset preprocessing")
flags.DEFINE_integer("prefetch_examples", 10, "number of examples to prefetch")
flags.DEFINE_integer("shuffle_buffer", 50000,
"buffer for shuffling training examples")
flags.DEFINE_float("learning_rate", 0.001, "learning for optimizer")
flags.DEFINE_float("grad_clip", None, "Clip gradient norm during training")
flags.DEFINE_integer("save_every", 100, "save model every this many steps")
flags.DEFINE_string("entity_names_file", None,
"mapping of Freebase mid to names")
flags.DEFINE_enum("model", "attention",
["distmult", "attention", "source_attention",
"source_rel_attention", "source_path_attention"],
"the model to use")
flags.DEFINE_bool("use_tanh", False, "use tanh non-linearity on embeddings")
flags.DEFINE_enum("attention_type", "bilinear",
["bilinear", "cosine", "sigmoid_bilinear",
"sigmoid_avg_bilinear", "relation"],
"type of attention to use for attention model")
flags.DEFINE_bool("analyze", False, "analyze model")
flags.DEFINE_integer("max_path_length", None,
"maximum path length for path attention models")
flags.DEFINE_string("text_kg_file", None, "path to text data")
flags.DEFINE_integer("max_text_len", None, "max length of text")
flags.DEFINE_integer("max_vocab_size", None, "max number of text words")
flags.DEFINE_integer("min_word_freq", None, "min freq threshold for text words")
flags.DEFINE_integer("max_text_neighbors", None, "max text neighbors")
flags.DEFINE_float("text_encoder_dropout", 1.0, "dropout for text cnn")
flags.DEFINE_list("text_encoder_filter_widths", ["3", "5", "7"],
"filter widths for cnn")
flags.DEFINE_enum("text_encoder_nonlinearity", "tanh", ["relu", "tanh"],
"non-linearity to use for TextCNN")
flags.DEFINE_integer("text_encoder_num_filters", 64, "num filters for cnn")
flags.DEFINE_string("clueweb_sentences", None,
"path to clueweb sentences (or data formatted like cw)")
flags.DEFINE_string("clueweb_data", None,
"path to clueweb data (or data formatted like cw)")
flags.DEFINE_string("clueweb_embeddings", None,
"path to clueweb embeddings (or data formatted like cw)")
flags.DEFINE_integer("text_emb_dim", None, "embedding dim for clueweb text")
flags.DEFINE_integer("subsample_text_rels", None,
"subsample text to max this many per pair")
flags.DEFINE_string("master", "local",
"""BNS name of the TensorFlow master to use.""")
flags.DEFINE_integer("task", 0,
"""Task id of the replica running the training.""")
flags.DEFINE_integer("ps_tasks", 0, """Number of tasks in the ps job.
If 0 no ps job is used.""")
flags.mark_flag_as_required("kg_file")
flags.mark_flag_as_required("output_dir")
def add_embedding_to_projector(projector_config, emb_name, emb_metadata_path):
embedding_conf = projector_config.embeddings.add()
embedding_conf.tensor_name = emb_name
embedding_conf.metadata_path = emb_metadata_path
def get_train_op(loss, optimizer, grad_clip=None, global_step=None):
"""Make a train_op apply gradients to loss using optimizer.
Args:
loss: the loss function to optimize
optimizer: the optimizer to compute and apply gradients
grad_clip: clip gradient norms by the value supplied (default dont clip)
global_step: tf.placeholder for global_step
Returns:
train_op: the training op to run
grads_and_vars: the gradients and variables for debugging
var_names: the variable names for debugging
capped_grads_and_vars: for debugging
"""
variables = tf.trainable_variables()
grads_and_vars = optimizer.compute_gradients(loss, variables)
var_names = [v.name for v in variables]
logging.info("Trainable variables:")
for var in var_names:
logging.info("\t %s", var)
logging.debug(grads_and_vars)
grad_var_norms = [(tf.global_norm([gv[1]]), tf.global_norm([gv[0]]))
for gv in grads_and_vars]
if grad_clip:
capped_grads_and_vars = [(tf.clip_by_norm(gv[0], grad_clip), gv[1])
for gv in grads_and_vars]
else:
capped_grads_and_vars = grads_and_vars
# norms of gradients for debugging
# grad_norms = [tf.sqrt(tf.reduce_sum(tf.square(grad)))
# for grad, _ in grads_and_vars]
train_op = optimizer.apply_gradients(capped_grads_and_vars,
global_step=global_step)
return train_op, grad_var_norms, var_names, capped_grads_and_vars
def read_graph_data(
kg_file, add_reverse_graph, add_inverse_edge, mode,
num_epochs, batchsize, max_neighbors, max_negatives,
train_graph=None, text_kg_file=None, val_graph=None
):
"""Read graph, create dataset and build model."""
# Read graphs and create datasets
entity_vocab = relation_vocab = None
if train_graph:
entity_vocab = train_graph.entity_vocab
relation_vocab = train_graph.relation_vocab
if FLAGS.clueweb_data and mode == "train":
graph_type = clueweb_text_graph.CWTextGraph
text_kg_file = FLAGS.clueweb_data
elif text_kg_file and mode == "train":
graph_type = text_graph.TextGraph
text_kg_file = FLAGS.text_kg_file
else:
graph_type = graph.Graph
text_kg_file = None
k_graph = graph_type(
text_kg_file=text_kg_file,
skip_new=True,
max_text_len=FLAGS.max_text_len,
max_vocab_size=FLAGS.max_vocab_size,
min_word_freq=FLAGS.min_word_freq,
kg_file=kg_file,
add_reverse_graph=add_reverse_graph,
add_inverse_edge=add_inverse_edge, mode=mode,
entity_vocab=entity_vocab, relation_vocab=relation_vocab,
max_path_length=FLAGS.max_path_length if mode == "train" else None,
embeddings_file=FLAGS.clueweb_embeddings,
sentence_vocab_file=FLAGS.clueweb_sentences,
subsample=FLAGS.subsample_text_rels
)
if FLAGS.text_kg_file:
max_text_len = FLAGS.max_text_len
if mode == "train":
max_text_len = max_text_len or k_graph.max_text_len
elif train_graph:
max_text_len = max_text_len or train_graph.max_text_len
else:
max_text_len = None
k_data = dataset.Dataset(data_graph=k_graph, train_graph=train_graph,
mode=mode, num_epochs=num_epochs,
batchsize=batchsize,
max_neighbors=max_neighbors,
max_negatives=max_negatives,
model_type=FLAGS.model,
max_text_len=max_text_len,
max_text_neighbors=FLAGS.max_text_neighbors,
val_graph=val_graph)
# Create the training data iterator and return the input tensors
# with tf.device("/job:worker"):
k_data.create_dataset_iterator(
num_parallel=FLAGS.num_parallel_preprocess,
prefetch=FLAGS.prefetch_examples,
shuffle_buffer=FLAGS.shuffle_buffer
# , device="worker" if FLAGS.master != "local" else "cpu"
)
return k_graph, k_data
def create_model(train_graph, iterator):
"""Create model and placeholders."""
if FLAGS.clueweb_data:
s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates, labels, text_nbrs_s_emb = iterator.get_next()
elif FLAGS.text_kg_file:
s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates, labels = \
iterator.get_next()
else:
s, nbrs_s, r, candidates, nbrs_candidates, labels = iterator.get_next()
# Create the attention model, this returns candidates scores and the model
# encoders in a dict for creating feed_dict for all encoders
is_train_ph = tf.placeholder_with_default(True, shape=[],
name="is_train_ph")
if FLAGS.model == "attention":
with tf.variable_scope("attention_model", reuse=False):
candidate_scores, model = models.attention_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, nbrs_s, r, candidates, nbrs_candidates)
)
elif FLAGS.model == "source_attention":
with tf.variable_scope("s_attention_model", reuse=False):
candidate_scores, model = models.source_attention_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, nbrs_s, r, candidates)
)
elif FLAGS.model in ["source_rel_attention", "source_path_attention"]:
if FLAGS.clueweb_data:
input_tensors = (s, nbrs_s, text_nbrs_s, text_nbrs_s_emb, r, candidates)
elif FLAGS.text_kg_file:
input_tensors = (s, nbrs_s, text_nbrs_s, r, candidates)
else:
input_tensors = (s, nbrs_s, r, candidates)
with tf.variable_scope("s_attention_model", reuse=False):
candidate_scores, model = models.source_attention_kbc_model(
FLAGS, train_graph, is_train_ph,
input_tensors, model_type=FLAGS.model
)
elif FLAGS.model == "distmult":
with tf.variable_scope("distmult_model", reuse=False):
candidate_scores, model = models.distmult_kbc_model(
FLAGS, train_graph, is_train_ph,
(s, r, candidates)
)
if FLAGS.clueweb_data:
inputs = (s, nbrs_s, text_nbrs_s, text_nbrs_s_emb,
r, candidates, nbrs_candidates)
elif FLAGS.text_kg_file:
inputs = (s, nbrs_s, text_nbrs_s, r, candidates, nbrs_candidates)
else:
inputs = (s, nbrs_s, r, candidates, nbrs_candidates)
return candidate_scores, candidates, labels, model, is_train_ph, inputs
def evaluate():
"""Run evaluation on dev or test data."""
add_inverse_edge = FLAGS.model in \
["source_rel_attention", "source_path_attention"]
if FLAGS.clueweb_data:
train_graph = clueweb_text_graph.CWTextGraph(
text_kg_file=FLAGS.clueweb_data,
embeddings_file=FLAGS.clueweb_embeddings,
sentence_vocab_file=FLAGS.clueweb_sentences,
skip_new=True,
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
subsample=FLAGS.subsample_text_rels
)
elif FLAGS.text_kg_file:
train_graph = text_graph.TextGraph(
text_kg_file=FLAGS.text_kg_file,
skip_new=True,
max_text_len=FLAGS.max_text_len,
max_vocab_size=FLAGS.max_vocab_size,
min_word_freq=FLAGS.min_word_freq,
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
max_path_length=FLAGS.max_path_length
)
else:
train_graph = graph.Graph(
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
max_path_length=FLAGS.max_path_length
)
# train_graph, _ = read_graph_data(
# kg_file=FLAGS.kg_file,
# add_reverse_graph=(FLAGS.model != "source_rel_attention"),
# add_inverse_edge=(FLAGS.model == "source_rel_attention"),
# mode="train", num_epochs=FLAGS.num_epochs, batchsize=FLAGS.batchsize,
# max_neighbors=FLAGS.max_neighbors,
# max_negatives=FLAGS.max_negatives
# )
val_graph = None
if FLAGS.dev_kg_file:
val_graph, eval_data = read_graph_data(
kg_file=FLAGS.dev_kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
# add_reverse_graph=False,
# add_inverse_edge=False,
mode="dev", num_epochs=1, batchsize=FLAGS.test_batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=FLAGS.max_negatives, train_graph=train_graph,
text_kg_file=FLAGS.text_kg_file
)
if FLAGS.test_kg_file:
_, eval_data = read_graph_data(
kg_file=FLAGS.test_kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
# add_reverse_graph=False,
# add_inverse_edge=False,
mode="test", num_epochs=1, batchsize=FLAGS.test_batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=None, train_graph=train_graph,
text_kg_file=FLAGS.text_kg_file,
val_graph=val_graph
)
if not FLAGS.dev_kg_file and not FLAGS.test_kg_file:
raise ValueError("Evalution without a dev or test file!")
iterator = eval_data.dataset.make_initializable_iterator()
candidate_scores, candidates, labels, model, is_train_ph, inputs = \
create_model(train_graph, iterator)
# Create eval metrics
# if FLAGS.dev_kg_file:
batch_rr = metrics.mrr(candidate_scores, candidates, labels)
mrr, mrr_update = tf.metrics.mean(batch_rr)
mrr_summary = tf.summary.scalar("MRR", mrr)
all_hits, all_hits_update, all_hits_summaries = [], [], []
for k in [1, 3, 10]:
batch_hits = metrics.hits_at_k(candidate_scores, candidates, labels, k=k)
hits, hits_update = tf.metrics.mean(batch_hits)
hits_summary = tf.summary.scalar("Hits_at_%d" % k, hits)
all_hits.append(hits)
all_hits_update.append(hits_update)
all_hits_summaries.append(hits_summary)
hits = tf.group(*all_hits)
hits_update = tf.group(*all_hits_update)
global_step = tf.Variable(0, name="global_step", trainable=False)
current_step = tf.Variable(0, name="current_step", trainable=False,
collections=[tf.GraphKeys.LOCAL_VARIABLES])
incr_current_step = tf.assign_add(current_step, 1)
reset_current_step = tf.assign(current_step, 0)
slim.get_or_create_global_step(graph=tf.get_default_graph())
# best_hits = tf.Variable(0., trainable=False)
# best_step = tf.Variable(0, trainable=False)
# with tf.control_dependencies([hits]):
# update_best_hits = tf.cond(tf.greater(hits, best_hits),
# lambda: tf.assign(best_hits, hits),
# lambda: 0.)
# update_best_step = tf.cond(tf.greater(hits, best_hits),
# lambda: tf.assign(best_step, global_step),
# lambda: 0)
# best_hits_summary = tf.summary.scalar("Best Hits@10", best_hits)
# best_step_summary = tf.summary.scalar("Best Step", best_step)
nexamples = eval_data.data_graph.tuple_store.shape[0]
if eval_data.data_graph.add_reverse_graph:
nexamples *= 2
num_batches = math.ceil(nexamples / float(FLAGS.test_batchsize))
local_init_op = tf.local_variables_initializer()
if FLAGS.analyze:
entity_names = utils.read_entity_name_mapping(FLAGS.entity_names_file)
session = tf.Session()
# summary_writer = tf.summary.FileWriter(FLAGS.output_dir, session.graph)
init_op = tf.global_variables_initializer()
session.run(init_op)
session.run(local_init_op)
saver = tf.train.Saver(tf.trainable_variables())
ckpt_path = FLAGS.model_path + "/model.ckpt-%d" % FLAGS.global_step
attention_probs = model["attention_encoder"].get_from_collection(
"attention_probs"
)
if FLAGS.clueweb_data:
s, nbrs_s, text_nbrs_s, text_nbrs_s_emb, r, candidates, _ = inputs
elif FLAGS.text_kg_file:
s, nbrs_s, text_nbrs_s, r, candidates, _ = inputs
else:
s, nbrs_s, r, candidates, _ = inputs
saver.restore(session, ckpt_path)
session.run(iterator.initializer)
num_attention = 5
nsteps = 0
outf_correct = open(FLAGS.output_dir + "/analyze_correct.txt", "w+")
outf_incorrect = open(
FLAGS.output_dir + "/analyze_incorrect.txt", "w+"
)
ncorrect = 0
analyze_outputs = [candidate_scores, s, nbrs_s, r, candidates, labels,
attention_probs]
if FLAGS.text_kg_file:
analyze_outputs.append(text_nbrs_s)
while True:
try:
analyze_vals = session.run(analyze_outputs, {is_train_ph: False})
if FLAGS.text_kg_file:
cscores, se, nbrs, qr, cands, te, nbr_attention_probs, text_nbrs = \
analyze_vals
else:
cscores, se, nbrs, qr, cands, te, nbr_attention_probs = analyze_vals
# import pdb; pdb.set_trace()
pred_ids = cscores.argmax(1)
for i in range(se.shape[0]):
sname = train_graph.inverse_entity_vocab[se[i]]
if sname in entity_names:
sname = entity_names[sname]
rname = train_graph.inverse_relation_vocab[qr[i]]
pred_target = cands[i, pred_ids[i]]
pred_name = train_graph.inverse_entity_vocab[pred_target]
if pred_name in entity_names:
pred_name = entity_names[pred_name]
tname = train_graph.inverse_entity_vocab[te[i][0]]
if tname in entity_names:
tname = entity_names[tname]
if te[i][0] == pred_target:
outf = outf_correct
ncorrect += 1
else:
outf = outf_incorrect
outf.write("\n(%d) %s, %s, ? \t Pred: %s \t Target: %s" %
(nsteps+i+1, sname, rname, pred_name, tname))
top_nbrs_index = np.argsort(nbr_attention_probs[i, :])[::-1]
outf.write("\nTop Nbrs:")
for j in range(num_attention):
nbr_index = top_nbrs_index[j]
if nbr_index < FLAGS.max_neighbors:
nbr_id = nbrs[i, nbr_index, :]
nbr_name = ""
for k in range(0, nbrs.shape[-1], 2):
ent_name = train_graph.inverse_entity_vocab[nbr_id[k+1]]
if ent_name in entity_names:
ent_name = entity_names[ent_name]
rel_name = train_graph.inverse_relation_vocab[nbr_id[k]]
nbr_name += "(%s, %s)" % (rel_name, ent_name)
else:
# Text Relation
text_nbr_ids = text_nbrs[i, nbr_index - FLAGS.max_neighbors, :]
text_nbr_ent = text_nbr_ids[0]
ent_name = train_graph.inverse_entity_vocab[text_nbr_ent]
if ent_name in entity_names:
ent_name = entity_names[ent_name]
rel_name = train_graph.get_relation_text(text_nbr_ids[1:])
nbr_name = "(%s, %s)" % (rel_name, ent_name)
outf.write("\n\t\t %s Prob: %.4f" %
(nbr_name, nbr_attention_probs[i, nbr_index]))
nsteps += se.shape[0]
tf.logging.info("Current hits@1: %.3f", ncorrect * 1.0 / (nsteps))
except tf.errors.OutOfRangeError:
break
outf_correct.close()
outf_incorrect.close()
return
class DataInitHook(tf.train.SessionRunHook):
def after_create_session(self, sess, coord):
sess.run(iterator.initializer)
sess.run(reset_current_step)
if FLAGS.test_only:
ckpt_path = FLAGS.model_path + "/model.ckpt-%d" % FLAGS.global_step
slim.evaluation.evaluate_once(
master=FLAGS.master,
checkpoint_path=ckpt_path,
logdir=FLAGS.output_dir,
variables_to_restore=tf.trainable_variables() + [global_step],
initial_op=tf.group(local_init_op, iterator.initializer),
# initial_op=iterator.initializer,
num_evals=num_batches,
eval_op=tf.group(mrr_update, hits_update, incr_current_step),
eval_op_feed_dict={is_train_ph: False},
final_op=tf.group(mrr, hits),
final_op_feed_dict={is_train_ph: False},
summary_op=tf.summary.merge([mrr_summary]+ all_hits_summaries),
hooks=[DataInitHook(),
tf.train.LoggingTensorHook(
{"mrr": mrr, "hits": hits, "step": current_step},
every_n_iter=1
)]
)
else:
slim.evaluation.evaluation_loop(
master=FLAGS.master,
checkpoint_dir=FLAGS.model_path,
logdir=FLAGS.output_dir,
variables_to_restore=tf.trainable_variables() + [global_step],
initial_op=tf.group(local_init_op, iterator.initializer),
# initial_op=iterator.initializer,
num_evals=num_batches,
eval_op=tf.group(mrr_update, hits_update, incr_current_step),
eval_op_feed_dict={is_train_ph: False},
final_op=tf.group(mrr, hits),
final_op_feed_dict={is_train_ph: False},
summary_op=tf.summary.merge([mrr_summary] + all_hits_summaries),
max_number_of_evaluations=None,
eval_interval_secs=60,
hooks=[DataInitHook(),
tf.train.LoggingTensorHook(
{"mrr": mrr, "hits": hits, "step": current_step},
every_n_iter=1
)]
)
def train():
"""Running the main training loop with given parameters."""
if FLAGS.task == 0 and not tf.gfile.Exists(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Read train/dev/test graphs, create datasets and model
add_inverse_edge = FLAGS.model in \
["source_rel_attention", "source_path_attention"]
train_graph, train_data = read_graph_data(
kg_file=FLAGS.kg_file,
add_reverse_graph=not add_inverse_edge,
add_inverse_edge=add_inverse_edge,
mode="train",
num_epochs=FLAGS.num_epochs, batchsize=FLAGS.batchsize,
max_neighbors=FLAGS.max_neighbors,
max_negatives=FLAGS.max_negatives,
text_kg_file=FLAGS.text_kg_file
)
worker_device = "/job:{}".format(FLAGS.brain_job_name)
with tf.device(
tf.train.replica_device_setter(
FLAGS.ps_tasks, worker_device=worker_device)):
iterator = train_data.dataset.make_one_shot_iterator()
candidate_scores, _, labels, model, is_train_ph, _ = create_model(
train_graph, iterator
)
# Create train loss and training op
loss = losses.softmax_crossentropy(logits=candidate_scores, labels=labels)
optimizer = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate)
global_step = tf.Variable(0, name="global_step", trainable=False)
train_op = get_train_op(loss, optimizer, FLAGS.grad_clip,
global_step=global_step)
tf.summary.scalar("Loss", loss)
run_options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
session_config = tf.ConfigProto(log_device_placement=True)
# Create tf training session
scaffold = tf.train.Scaffold(saver=tf.train.Saver(max_to_keep=1000))
# ckpt_hook = tf.train.CheckpointSaverHook(
# checkpoint_dir=FLAGS.output_dir, scaffold=scaffold,
# save_steps=FLAGS.save_every
# )
# summary_hook = tf.train.SummarySaverHook(
# save_secs=60, output_dir=FLAGS.output_dir,
# summary_op=tf.summary.merge_all()
# )
session = tf.train.MonitoredTrainingSession(
master=FLAGS.master,
is_chief=(FLAGS.task == 0),
checkpoint_dir=FLAGS.output_dir,
save_checkpoint_steps=FLAGS.save_every,
scaffold=scaffold,
save_summaries_secs=60,
# hooks=[summary_hook],
# chief_only_hooks=[ckpt_hook],
config=session_config
)
# Create embeddings visualization
if FLAGS.task == 0:
utils.save_embedding_vocabs(FLAGS.output_dir, train_graph,
FLAGS.entity_names_file)
pconfig = projector.ProjectorConfig()
add_embedding_to_projector(
pconfig, model["entity_encoder"].embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "entity_vocab.tsv")
)
add_embedding_to_projector(
pconfig, model["relation_encoder"].embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "relation_vocab.tsv")
)
if FLAGS.text_kg_file:
word_embeddings = model["text_encoder"].word_embedding_encoder.embeddings
add_embedding_to_projector(
pconfig, word_embeddings.name.split(":")[0],
os.path.join(FLAGS.output_dir, "word_vocab.tsv")
)
projector.visualize_embeddings(
SummaryWriterCache.get(FLAGS.output_dir), pconfig
)
# Main training loop
running_total_loss = 0.
nsteps = 0
gc.collect()
while True:
try:
current_loss, _, _ = session.run(
[loss, train_op, global_step],
# feed_dict={is_train_ph: True, handle: train_iterator_handle},
feed_dict={is_train_ph: True},
options=run_options
)
nsteps += 1
running_total_loss += current_loss
tf.logging.info("Step %d, loss: %.3f, running avg loss: %.3f",
nsteps, current_loss, running_total_loss / nsteps)
if nsteps %2 == 0:
gc.collect()
except tf.errors.OutOfRangeError:
tf.logging.info("End of Traning Epochs after %d steps", nsteps)
break
def main(argv):
del argv
if FLAGS.test_only or FLAGS.evaluate or FLAGS.analyze:
evaluate()
else:
train()
if __name__ == "__main__":
app.run(main)
|
|
#!/usr/bin/python
import argparse
import os
import sys # exit
import shutil # copyfile
import math # pi
import subprocess # Popen
rapterRoot = "/home/bontius/workspace/RAPter/";
rapterExec = os.path.join( rapterRoot, "RAPter", "build", "Release", "bin", "rapter" );
def show( primitivesPath, associationsPath, title, args ):
cmd = os.path.join("..","rapterVis --show%s --scale %f --pop-limit %d -p %s -a %s --cloud %s --title %s --angle-gens %s --use-tags --no-clusters --statuses -1,1 --no-pop --dir-colours --no-rel --no-scale --bg-colour 1.,1.,1. --no-rel" \
% ( args.flag3D, args.scale, args.popLimit, primitivesPath, associationsPath, args.cloud, title, args.angleGensStr ) );
print cmd
#print os.spawnlp( os.P_NOWAIT, "..", cmd )
subprocess.Popen( cmd, shell=True );
def call( cmd, dry = True, noExit = False ):
print("%s" % (cmd))
if dry:
print "DRY"
else:
print "RUN"
if not dry:
ret = os.system(cmd) >> 8 # python thing
if ret != 0:
if not noExit:
print("call returned error ", ret, ", aborting")
sys.exit()
return ret
def runRepr( rprPrims, rprAssoc, rprIter, args, angleGens, keepSingles ):
print( "Running with prims: %s, assoc: %s, iteration: %d" % (rprPrims,rprAssoc,rprIter) );
rprRepr = "representatives_it%d.csv" % rprIter # representatives output, contains one patch for each dId
rprReprAssoc = "points_representatives_it%d.csv" % rprIter # representatives output, contains associations for representative primitives only
rprCands = "candidates_representatives_it%d.csv" % rprIter # candidates generated from representatives
rprReprOpt = "representatives_it%d.bonmin.csv" % rprIter # new representatives chosen from candidates
rprPrimBak = "%s.lvl1.csv" % os.path.splitext(rprPrims)[0] # "`cutExt $rprPrims`".lvl1.csv
rprNextId = rprIter + 1; #`expr $c + 1`; #candidates will output here automatically...so we need to know
rprPw = args.pw
rprAngLimit = args.angleLimit
# representatives
cmd = "%s --represent%s -p %s -a %s -sc %f --cloud %s --angle-gens %s" \
% (args.rapterExec, args.flag3D, rprPrims, rprAssoc, args.scale, args.cloud, angleGens );
#my_exec "$executable --represent$flag3D -p $rprPrims -a $rprAssoc -sc $scale --cloud cloud.ply --angle-gens $anglegens"
call( cmd, args.dry );
if not args.dry:
#echo "mv representatives.csv $rprRepr"
#mv representatives.csv $rprRepr
shutil.move("representatives.csv",rprRepr);
#echo "mv points_representatives.csv $rprReprAssoc"
#mv points_representatives.csv $rprReprAssoc
shutil.move("points_representatives.csv",rprReprAssoc);
# ShowRepr
#cmd="../globOptVis --show$flag3D--scale $scale --pop-limit $poplimit --title \"Representatives\" --angle-gens $angleGens --use-tags --no-clusters --statuses -1,1 --no-pop --dir-colours --no-scale --bg-colour .9,.9,.9 --ids --no-rel -p $repr -a $assoc $"
#my_exec "../globOptVis --show$flag3D --scale $scale --pop-limit $poplimit -p $rprRepr -a $rprReprAssoc --title \"Representatives\" $visdefparam &"
# Generate from Repr
if not args.dry:
#echo "mv candidates_it${rprNextId}.csv candidates_it${rprNextId}_tmp.csv" # move tmp out of the way
#mv candidates_it${rprNextId}.csv candidates_it${rprNextId}_tmp.csv # move tmp out of the way
if os.path.isfile( "candidates_it%d.csv" % rprNextId ):
shutil.move( "candidates_it%d.csv" % rprNextId, "candidates_it%d_tmp.csv" % rprNextId );
#cmd = "$executable --generate$flag3D $tripletSafe -sc $scale -al $rprAngLimit -ald ${cand_anglediv} --small-mode 0 --patch-pop-limit $poplimit --angle-gens $candAngleGens --small-thresh-mult $smallThresh -p $rprRepr --assoc $rprReprAssoc --keep-singles"
#my_exec "%s --generate%s -sc $scale -al $rprAngLimit -ald 1.0 --patch-pop-limit $poplimit -p $rprRepr --assoc $rprReprAssoc --angle-gens $candAngleGens --small-thresh-mult %f --small-mode 0 %s %s"
cmd = "%s --generate%s -sc %f --cloud %s -al %f -ald 1.0 --patch-pop-limit %d -p %s --assoc %s --angle-gens %s --small-thresh-mult %f --small-mode 0 %s %s" \
% ( args.rapterExec, args.flag3D, args.scale, args.cloud, rprAngLimit, args.popLimit, rprRepr, rprReprAssoc, candAngleGens, \
args.smallThreshMult, args.tripletSafe, keepSingles );
call( cmd, args.dry );
if not args.dry:
# echo "mv candidates_it${rprNextId}.csv $rprCands"
# mv candidates_it${rprNextId}.csv $rprCands
shutil.move( "candidates_it%d.csv" % rprNextId, rprCands );
# echo "mv candidates_it${rprNextId}_tmp.csv candidates_it${rprNextId}.csv"
# mv candidates_it${rprNextId}_tmp.csv candidates_it${rprNextId}.csv
if os.path.isfile( "candidates_it%d_tmp.csv" % rprNextId ):
shutil.move( "candidates_it%d_tmp.csv" % rprNextId, "candidates_it%d.csv" % rprNextId ); # move back tmp
# Show candidates
#my_exec "../globOptVis --show$flag3D --scale $scale --pop-limit $poplimit -p $rprCands -a $rprReprAssoc --title \"GlobOpt-repr_candidates\" $visdefparam &"
# Formulate
# my_exec "$executable --formulate$flag3D $formParams --scale $scale --cloud cloud.ply --unary $unary --pw $rprPw --cmp $cmp --constr-mode patch --dir-bias $dirbias --patch-pop-limit $poplimit --angle-gens $anglegens --candidates $rprCands -a $rprReprAssoc --freq-weight $freqweight --cost-fn $pwCostFunc"
cmd = "%s --formulate%s --scale %f --unary %f --pw %f --spat-weight %f --spat-dist-mult 2. --patch-pop-limit %d --angle-gens %s --cloud %s --candidates %s -a %s --collapse-angle-deg %f --trunc-angle %f --constr-mode patch --dir-bias 0 --no-clusters --cmp 0 --freq-weight 0 --cost-fn spatsqrt" \
% ( args.rapterExec, args.flag3D, args.scale, args.data, rprPw, args.spatial, args.popLimit, angleGens, args.cloud, rprCands, rprReprAssoc, collapseThreshDeg, args.angleLimit)
call( cmd, args.dry );
rprDiagF = "diag_it%d.gv" % rprIter;
rprDiagFTmp = "%s%s" % (rprDiagF,"RprTmp");
if not args.dry:
# echo "cp primitives_it${rprIter}.bonmin.csv primitives_it${rprIter}_rprtmp.csv"
# cp primitives_it${rprIter}.bonmin.csv primitives_it${rprIter}_rprtmp.csv
shutil.copyfile( "primitives_it%d.bonmin.csv" % rprIter, "primitives_it%d_rprtmp.csv" % rprIter );
if os.path.isfile( rprDiagF ): # backup diag_itx.gv
#echo "mv $rprDiagF $rprDiagFTmp";
# mv $rprDiagF "$rprDiagFTmp"
shutil.move( rprDiagF, rprDiagFTmp );
# my_exec "$executable --solver$flag3D bonmin --problem problem -v --time -1 --angle-gens $anglegens --bmode $algCode --candidates $rprCands"
cmd = "%s --solver%s bonmin --problem problem -v --time -1 --angle-gens %s --bmode %d --candidates %s" \
% (args.rapterExec, args.flag3D, angleGens, args.algCode, rprCands )
call (cmd, args.dry );
if not args.dry:
# echo "cp primitives_it${rprIter}.bonmin.csv $rprReprOpt"
# cp primitives_it${rprIter}.bonmin.csv $rprReprOpt
shutil.copyfile( "primitives_it%d.bonmin.csv" % rprIter, rprReprOpt );
# echo "cp primitives_it${rprIter}_rprtmp.csv primitives_it${rprIter}.bonmin.csv"
# cp primitives_it${rprIter}_rprtmp.csv primitives_it${rprIter}.bonmin.csv
shutil.copyfile( "primitives_it%d_rprtmp.csv" % rprIter, "primitives_it%d.bonmin.csv" % rprIter );
# echo "mv $rprDiagF diag_it${rprIter}.lvl2.gv"
# mv $rprDiagF diag_it${rprIter}.lvl2.gv
shutil.move( rprDiagF, "diag_it%d.lvl2.gv" % rprIter );
# restore diag_itx.gv
if os.path.isfile( rprDiagFTmp ):
# echo "mv $rprDiagFTmp $rprDiagF"
# mv "$rprDiagFTmp" $rprDiagF
shutil.move( rprDiagFTmp, rprDiagF );
# rm "$rprDiagFTmp";
#os.remove( rprDiagFTmp );
#my_exec "../globOptVis --show$flag3D -p $rprReprOpt -a $rprReprAssoc --title \"GlobOpt-RepresentativesOptimized\" --scale $scale --pop-limit $poplimit $visdefparam &"
# apply representatives - outputs subs.csv
#my_exec "$executable --representBack$flag3D --repr $rprReprOpt -p $rprPrims -a $rprAssoc -sc $scale --cloud cloud.ply --angle-gens $anglegens"
cmd = "%s --representBack%s --repr %s -p %s -a %s -sc %f --cloud %s --angle-gens %s" \
% (args.rapterExec, args.flag3D, rprReprOpt, rprPrims, rprAssoc, args.scale, args.cloud, angleGens );
call( cmd, args.dry );
if not args.dry:
# echo "mv $rprPrims $rprPrimBak"
# mv $rprPrims $rprPrimBak
shutil.move( rprPrims, rprPrimBak );
# echo "mv subs.csv $rprPrims" #substitue for input
# mv subs.csv $rprPrims
shutil.move( "subs.csv", rprPrims );
parser = argparse.ArgumentParser()
suggestedGroup = parser.add_argument_group('suggested');
suggestedGroup.add_argument( "-s" , "--scale" , dest="scale" , type=float, default=0.05, help="Scale (rho) parameter, the smallest feature size to preserve [0.001..0.05]", required=True)
suggestedGroup.add_argument( "--al", "--angle-limit", dest="angleLimit" , type=float, default=15 , help="Angle threshlod (tau) parameter in degrees [5..45]")
suggestedGroup.add_argument( "--pw", "--pairwise" , dest="pw" , type=float, default=1.0 , help="Weight of pairwise term [0.1..10^6]" )
suggestedGroup.add_argument( "-t" , "--area-thresh-start" , dest="smallThreshMult", type=float, default= 4., help="Start with planes, that are scale * smallThreshMult large. Increase this, if optimisation too slow. [powers of 2].")
optionalGroup = parser.add_argument_group('optional');
optionalGroup.add_argument( "--ag", "--angle-gens" , dest="angleGens" , type=float, default=[0,90], help="Weight of pairwise term [0.1..10^6]", action="append" )
optionalGroup.add_argument( "--it", "--iterations" , dest="nbExtraIterations", type=int, default=15, help="How many iterations to run [5..20]")
optionalGroup.add_argument( "--cl", "--cloud" , dest="cloud" , type=str , default = "cloud.ply", help="Pointcloud in ply format [cloud.ply]");
optionalGroup.add_argument( "-l" , "--lines" , dest="lines" , action="store_true" , help="Work in 2D with lines instead of planes." )
runOptGroup = parser.add_argument_group('run options');
runOptGroup.add_argument( "--dry", action="store_true" , help="Show the calls, but don't run." )
runOptGroup.add_argument( "--no-vis", dest="noVis", action="store_false", default = False, help="Disable visualization (enabled by default)" )
optionalGroup.add_argument( "--pl", "--popLimit" , dest="popLimit" , type=int , default=5 , help="Filters primitives having less than this many points assigned [3..100]")
optionalGroup.add_argument( "--sp", "--spatial" , dest="spatial" , type=float, help="Weight of spatial term [0.1, pw/10., pw/5., pw/2.]" )
optionalGroup.add_argument("--vl" , "--var-limit" , dest="variableLimit", type=int , default=1000, help="Maximum number of variables (primitives) for the optimisation. [500..3000]")
optionalGroup.add_argument( "-d" , "--data" , dest="data" , type=float, default=1e5 , help="Weight of data term [10^5, 10^6]" )
optionalGroup.add_argument( "-p" , "--primitives" , dest="primitives" , type=str , help="Input primitives, e.g. existing segmentation segments.csv" )
optionalGroup.add_argument( "-a" , "--assoc" , dest="associations" , type=str , help="Input point-primitive associations, e.g. existing segmentation's points_segments.csv" )
optionalGroup.add_argument("--segment-scale-mult" , dest="segmentScaleMultiplier", type=float, default=1.0, help="Multiply scale by this value for the segmentation step. [0.5, 1.0, 2.0]")
optionalGroup.add_argument("--ald", "--angle-limit-divisor", dest="angleLimitDivisor" , type=float, default=1.0, help="Divide angle threshold (tau) by this number for candidate generation. [2.0, 1.0, 0.5]")
optionalGroup.add_argument("--alg-code" , dest="algCode" , type=int , default=0 , help="Bonmin algorithm enum codes. 0: B_BB, 1: OA, 2: QG, 3: Hyb, 4: ECP, 5: IFP. [0]");
args = parser.parse_args()
if not os.path.isfile(args.cloud):
print("Need \"%s\" to exist, assuming it's the pointcloud" % args.cloud );
sys.exit(1);
# if not args.scale:
# print("Need scale -s, --scale")
# exit
# if not args.angleLimit:
# print("Need angleLimit! Set using '-al' or '--angle-limit'!")
# exit
# convert to radians
args.angleLimit = args.angleLimit / 180.0 * math.pi
args.angleGensStr = ",".join( str(e) for e in args.angleGens )
print( "--popLimit %d \twill keep all primitives, that have more than this number of assigned points" % (args.popLimit) );
if not args.spatial:
args.spatial = args.pw / 10.
print( "--spatial %.3f" % (args.spatial) );
setattr( args, "rapterExec", rapterExec );
if not args.lines:
setattr( args, "flag3D" ,"3D" );
setattr( args, "tripletSafe","--triplet-safe" ); # Values: ["", "--triplet-safe"]
useAllGens = min(5, args.nbExtraIterations-1 ); # start with parallel generation only
else:
setattr( args, "flag3D" ,"" );
setattr( args, "tripletSafe","" );
useAllGens = 0
########################################################################################################################
# Do segmentation
if not args.primitives or not args.associaitons:
cmd = "%s --segment%s --scale %f --angle-limit %f --angle-gens %s --patch-pop-limit %d --dist-limit-mult %f --cloud %s" \
% ( rapterExec, args.flag3D, args.scale, args.angleLimit, args.angleGensStr, args.popLimit, args.segmentScaleMultiplier, args.cloud )
call( cmd, args.dry );
# # save output
if ( os.path.isfile("patches.csv") and os.path.isfile("points_primitives.csv") ):
if os.path.isfile("segments.csv"):
shutil.copyfile( "segments.csv", "segments.csv.bak" );
shutil.copyfile( "patches.csv", "segments.csv" )
if os.path.isfile("points_segments.csv"):
shutil.copyfile( "points_segments.csv", "points_segments.csv.bak" );
shutil.copyfile( "points_primitives.csv", "points_segments.csv" )
if not args.noVis:
show( "segments.csv", "points_segments.csv", "\"RAPter - Segmentation\"", args );
########################################################################################################################
########################################################################################################################
angleGens = "0"
candAngleGens = "0" # used to mirror anglegens, but keep const "0" for generate
primitives = "patches.csv"
associations = "points_primitives.csv"
keepSingles = "--keep-singles"
allowPromoted = "--allow-promoted"
smallThreshDiv = 2. # area threshold stepsize
smallThreshLimit = 0. # when to stop decreasing area threshold
promRem = 0 # remaining primitives to promote
collapseThreshDeg = 0.4 # initialize optimisation with the closest two orientations merged, if their difference is < colleapseThreshDeg degrees.
adopt = 0
adoptChanged = False
decreaseLevel = False
iteration = 0
while iteration <= args.nbExtraIterations:
# decresase, unless there is more to do on the same level
if decreaseLevel:
args.smallThreshMult = float(int(args.smallThreshMult / smallThreshDiv));
# if we reached the bottom working scale (ideally 0)
if args.smallThreshMult <= smallThreshLimit:
args.smallThreshMult = int(smallThreshLimit) # make sure it's integer
if decreaseLevel: # if we don't have to promote any more patches on this level
adopt = "1" # if we promoted all patches, we can allow points to get re-assigned
if not adoptChanged:
adoptChanged = True # only enter here once
useAllGens = iteration + 2 # if we promoted all patches in the scene, do a 90 round
args.nbExtraIterations = max(args.nbExtraIterations,useAllGens + 3) # do k more rounds after the 90 round
# reset to false, meaning we will continue decreasing, unless generate flips it again
decreaseLevel = True
print( "smallThreshMult: %d" % args.smallThreshMult );
print( "__________________________________________________________" );
print( "Start iteration %d" % iteration );
prevId = iteration - 1;
nextId = iteration + 1;
if iteration > 0:
primitives = "primitives_merged_it%d.csv" % prevId;
associations = "points_primitives_it%d.csv" % prevId;
# (2) Generate - generate candidates
cmd = "%s --generate%s -sc %f -al %f -ald %f --patch-pop-limit %d -p %s --assoc %s --cloud %s --angle-gens %s --small-thresh-mult %f --var-limit %d --small-mode 0 %s %s %s" \
% (rapterExec, args.flag3D, args.scale, args.angleLimit, args.angleLimitDivisor, args.popLimit, primitives, associations, args.cloud, candAngleGens, \
args.smallThreshMult, args.variableLimit, \
args.tripletSafe, keepSingles, allowPromoted );
promRem = call( cmd, args.dry, True );
print "[rapter.py] Remaining smalls to promote: ", promRem
# Don't decrase area threshold until no more candidates to promote
if promRem != 0:
decreaseLevel = False; # set to true each iteration
# (3) Formulate - create optimisation problem
cmd = "%s --formulate%s --scale %f --unary %f --pw %f --spat-weight %f --spat-dist-mult 2. --patch-pop-limit %d --angle-gens %s --cloud %s --candidates candidates_it%d.csv -a %s --collapse-angle-deg %f --trunc-angle %f --constr-mode patch --dir-bias 0 --no-clusters --cmp 0 --freq-weight 0 --cost-fn spatsqrt" \
% ( rapterExec, args.flag3D, args.scale, args.data, args.pw, args.spatial, args.popLimit, angleGens, args.cloud, iteration, associations, collapseThreshDeg, args.angleLimit)
call( cmd, args.dry );
# (4) Solve
cmd = "%s --solver%s bonmin --problem problem -v --time -1 --bmode %d --angle-gens %s --candidates candidates_it%d.csv --cloud %s" \
% ( rapterExec, args.flag3D, args.algCode, angleGens, iteration, args.cloud );
call( cmd, args.dry )
if not args.noVis:
show( "primitives_it%d.bonmin.csv" % iteration, associations, "\"RAPter - Iteration%d\"" % iteration, args );
if iteration == useAllGens:
angleGens = ','.join( str(e) for e in args.angleGens );
candAngleGens = angleGens;
# TODO
runRepr( "primitives_it%d.bonmin.csv" % iteration, associations, iteration, args, angleGens, keepSingles );
# (6) CoPlanarity
cmd = "%s --merge%s --scale %f --adopt %s --prims primitives_it%d.bonmin.csv -a %s --angle-gens %s --patch-pop-limit %d --cloud %s" \
% ( rapterExec, args.flag3D, args.scale, adopt, iteration, associations, angleGens, args.popLimit, args.cloud );
call( cmd, args.dry )
# Don't copy promoted patches' directions to other patches after 4 iterations (c==3), since they are not reliable anymore
if iteration == 3:
allowPromoted = ""
# Don't throw away single directions before the 3rd (c==1) iteration.
# This will keep large patches, even if they don't copy to anywhere for later.
if iteration == 1:
keepSingles = ""
# If we are still promoting small patches on this working scale, make sure to run more iterations
if iteration == args.nbExtraIterations and promRem != 0:
args.nbExtraIterations += 1;
# Increment iteration counter (while loop)
iteration += 1;
|
|
from __future__ import print_function, division, absolute_import
import traceback
import inspect
import sys
from .abstract import *
from .common import *
from .misc import unliteral
from numba.ir import Loc
from numba import errors
# terminal color markup
_termcolor = errors.termcolor()
class _ResolutionFailures(object):
"""Collect and format function resolution failures.
"""
def __init__(self, context, function_type, args, kwargs):
self._context = context
self._function_type = function_type
self._args = args
self._kwargs = kwargs
self._failures = []
def __len__(self):
return len(self._failures)
def add_error(self, calltemplate, error):
"""
Args
----
calltemplate : CallTemplate
error : Exception or str
Error message
"""
self._failures.append((calltemplate, error))
def format(self):
"""Return a formatted error message from all the gathered errors.
"""
indent = ' ' * 4
args = [str(a) for a in self._args]
args += ["%s=%s" % (k, v) for k, v in sorted(self._kwargs.items())]
headtmp = 'Invalid use of {} with argument(s) of type(s): ({})'
msgbuf = [headtmp.format(self._function_type, ', '.join(args))]
explain = self._context.explain_function_type(self._function_type)
msgbuf.append(explain)
for i, (temp, error) in enumerate(self._failures):
msgbuf.append(_termcolor.errmsg("In definition {}:".format(i)))
msgbuf.append(_termcolor.highlight('{}{}'.format(
indent, self.format_error(error))))
loc = self.get_loc(temp, error)
if loc:
msgbuf.append('{}raised from {}'.format(indent, loc))
likely_cause = ("This error is usually caused by passing an argument "
"of a type that is unsupported by the named function.")
msgbuf.append(_termcolor.errmsg(likely_cause))
return '\n'.join(msgbuf)
def format_error(self, error):
"""Format error message or exception
"""
if isinstance(error, Exception):
return '{}: {}'.format(type(error).__name__, error)
else:
return '{}'.format(error)
def get_loc(self, classtemplate, error):
"""Get source location information from the error message.
"""
if isinstance(error, Exception) and hasattr(error, '__traceback__'):
# traceback is unavailable in py2
frame = traceback.extract_tb(error.__traceback__)[-1]
return "{}:{}".format(frame[0], frame[1])
class BaseFunction(Callable):
"""
Base type class for some function types.
"""
def __init__(self, template):
if isinstance(template, (list, tuple)):
self.templates = tuple(template)
keys = set(temp.key for temp in self.templates)
if len(keys) != 1:
raise ValueError("incompatible templates: keys = %s"
% (this,))
self.typing_key, = keys
else:
self.templates = (template,)
self.typing_key = template.key
self._impl_keys = {}
name = "%s(%s)" % (self.__class__.__name__, self.typing_key)
super(BaseFunction, self).__init__(name)
@property
def key(self):
return self.typing_key, self.templates
def augment(self, other):
"""
Augment this function type with the other function types' templates,
so as to support more input types.
"""
if type(other) is type(self) and other.typing_key == self.typing_key:
return type(self)(self.templates + other.templates)
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self._impl_keys[sig.args]
def get_call_type(self, context, args, kws):
failures = _ResolutionFailures(context, self, args, kws)
for temp_cls in self.templates:
temp = temp_cls(context)
for uselit in [True, False]:
try:
if uselit:
sig = temp.apply(args, kws)
else:
nolitargs = tuple([unliteral(a) for a in args])
nolitkws = {k: unliteral(v) for k, v in kws.items()}
sig = temp.apply(nolitargs, nolitkws)
except Exception as e:
sig = None
failures.add_error(temp_cls, e)
else:
if sig is not None:
self._impl_keys[sig.args] = temp.get_impl_key(sig)
return sig
else:
haslit= '' if uselit else 'out'
msg = "All templates rejected with%s literals." % haslit
failures.add_error(temp_cls, msg)
if len(failures) == 0:
raise AssertionError("Internal Error. "
"Function resolution ended with no failures "
"or successfull signature")
raise errors.TypingError(failures.format())
def get_call_signatures(self):
sigs = []
is_param = False
for temp in self.templates:
sigs += getattr(temp, 'cases', [])
is_param = is_param or hasattr(temp, 'generic')
return sigs, is_param
class Function(BaseFunction, Opaque):
"""
Type class for builtin functions implemented by Numba.
"""
class BoundFunction(Callable, Opaque):
"""
A function with an implicit first argument (denoted as *this* below).
"""
def __init__(self, template, this):
# Create a derived template with an attribute *this*
newcls = type(template.__name__ + '.' + str(this), (template,),
dict(this=this))
self.template = newcls
self.typing_key = self.template.key
self.this = this
name = "%s(%s for %s)" % (self.__class__.__name__,
self.typing_key, self.this)
super(BoundFunction, self).__init__(name)
def unify(self, typingctx, other):
if (isinstance(other, BoundFunction) and
self.typing_key == other.typing_key):
this = typingctx.unify_pairs(self.this, other.this)
if this is not None:
# XXX is it right that both template instances are distinct?
return self.copy(this=this)
def copy(self, this):
return type(self)(self.template, this)
@property
def key(self):
return self.typing_key, self.this
def get_impl_key(self, sig):
"""
Get the implementation key (used by the target context) for the
given signature.
"""
return self.typing_key
def get_call_type(self, context, args, kws):
template = self.template(context)
e = None
# Try with Literal
try:
out = template.apply(args, kws)
except Exception as e:
out = None
# If that doesn't work, remove literals
if out is None:
args = [unliteral(a) for a in args]
kws = {k: unliteral(v) for k, v in kws.items()}
out = template.apply(args, kws)
if out is None and e is not None:
raise e
return out
def get_call_signatures(self):
sigs = getattr(self.template, 'cases', [])
is_param = hasattr(self.template, 'generic')
return sigs, is_param
class MakeFunctionLiteral(Literal, Opaque):
pass
class WeakType(Type):
"""
Base class for types parametered by a mortal object, to which only
a weak reference is kept.
"""
def _store_object(self, obj):
self._wr = weakref.ref(obj)
def _get_object(self):
obj = self._wr()
if obj is None:
raise ReferenceError("underlying object has vanished")
return obj
@property
def key(self):
return self._wr
def __eq__(self, other):
if type(self) is type(other):
obj = self._wr()
return obj is not None and obj is other._wr()
def __hash__(self):
return Type.__hash__(self)
class Dispatcher(WeakType, Callable, Dummy):
"""
Type class for @jit-compiled functions.
"""
def __init__(self, dispatcher):
self._store_object(dispatcher)
super(Dispatcher, self).__init__("type(%s)" % dispatcher)
def get_call_type(self, context, args, kws):
"""
Resolve a call to this dispatcher using the given argument types.
A signature returned and it is ensured that a compiled specialization
is available for it.
"""
template, pysig, args, kws = self.dispatcher.get_call_template(args, kws)
sig = template(context).apply(args, kws)
if sig:
sig.pysig = pysig
return sig
def get_call_signatures(self):
sigs = self.dispatcher.nopython_signatures
return sigs, True
@property
def dispatcher(self):
"""
A strong reference to the underlying numba.dispatcher.Dispatcher instance.
"""
return self._get_object()
def get_overload(self, sig):
"""
Get the compiled overload for the given signature.
"""
return self.dispatcher.get_overload(sig.args)
def get_impl_key(self, sig):
"""
Get the implementation key for the given signature.
"""
return self.get_overload(sig)
class ObjModeDispatcher(Dispatcher):
"""Dispatcher subclass that enters objectmode function.
"""
pass
class ExternalFunctionPointer(BaseFunction):
"""
A pointer to a native function (e.g. exported via ctypes or cffi).
*get_pointer* is a Python function taking an object
and returning the raw pointer value as an int.
"""
def __init__(self, sig, get_pointer, cconv=None):
from ..typing.templates import (AbstractTemplate, make_concrete_template,
signature)
from . import ffi_forced_object
if sig.return_type == ffi_forced_object:
raise TypeError("Cannot return a pyobject from a external function")
self.sig = sig
self.requires_gil = any(a == ffi_forced_object for a in self.sig.args)
self.get_pointer = get_pointer
self.cconv = cconv
if self.requires_gil:
class GilRequiringDefn(AbstractTemplate):
key = self.sig
def generic(self, args, kws):
if kws:
raise TypeError("does not support keyword arguments")
# Make ffi_forced_object a bottom type to allow any type to be
# casted to it. This is the only place that support
# ffi_forced_object.
coerced = [actual if formal == ffi_forced_object else formal
for actual, formal
in zip(args, self.key.args)]
return signature(self.key.return_type, *coerced)
template = GilRequiringDefn
else:
template = make_concrete_template("CFuncPtr", sig, [sig])
super(ExternalFunctionPointer, self).__init__(template)
@property
def key(self):
return self.sig, self.cconv, self.get_pointer
class ExternalFunction(Function):
"""
A named native function (resolvable by LLVM) accepting an explicit signature.
For internal use only.
"""
def __init__(self, symbol, sig):
from .. import typing
self.symbol = symbol
self.sig = sig
template = typing.make_concrete_template(symbol, symbol, [sig])
super(ExternalFunction, self).__init__(template)
@property
def key(self):
return self.symbol, self.sig
class NamedTupleClass(Callable, Opaque):
"""
Type class for namedtuple classes.
"""
def __init__(self, instance_class):
self.instance_class = instance_class
name = "class(%s)" % (instance_class)
super(NamedTupleClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overriden by the __call__ constructor resolution in typing.collections
return None
def get_call_signatures(self):
return (), True
@property
def key(self):
return self.instance_class
class NumberClass(Callable, DTypeSpec, Opaque):
"""
Type class for number classes (e.g. "np.float64").
"""
def __init__(self, instance_type):
self.instance_type = instance_type
name = "class(%s)" % (instance_type,)
super(NumberClass, self).__init__(name)
def get_call_type(self, context, args, kws):
# Overriden by the __call__ constructor resolution in typing.builtins
return None
def get_call_signatures(self):
return (), True
@property
def key(self):
return self.instance_type
@property
def dtype(self):
return self.instance_type
class RecursiveCall(Opaque):
"""
Recursive call to a Dispatcher.
"""
_overloads = None
def __init__(self, dispatcher_type):
assert isinstance(dispatcher_type, Dispatcher)
self.dispatcher_type = dispatcher_type
name = "recursive(%s)" % (dispatcher_type,)
super(RecursiveCall, self).__init__(name)
# Initializing for the first time
if self._overloads is None:
self._overloads = {}
@property
def overloads(self):
return self._overloads
@property
def key(self):
return self.dispatcher_type
|
|
"""
Peter Norvig's lis.py hacked with lower-level data representations.
Plus a few other tweaks like support for the 'quote' read macro.
To do: add a garbage collector and clean up the added code.
This will be more work than I thought because the recursive eval
needs to make its local variables known to the garbage collector --
silly of me to have forgotten that. (The issue didn't come up in
awklisp because it was already using a stack.)
"""
from __future__ import division
def tag_of(x):
return x[0]
def untag(tag, x):
assert x[0] == tag
return x[1]
nil = 'nil', None
false = 'bool', False
true = 'bool', True
def Bool(flag): return true if flag else false
def tester(op): return 'prim', lambda *args: Bool(op(*args))
def arith(op):
return 'prim', lambda *args: ('num',
op(*[untag('num', arg) for arg in args]))
def unnum(op):
return lambda *args: op(*[untag('num', arg) for arg in args])
symbols = {}
def Symbol(name):
if name not in symbols:
symbols[name] = 'symbol', name
return symbols[name]
heap_size = 1*1000
cars = [None] * heap_size
cdrs = [None] * heap_size
heap_ptr = 0
def cons(a, d):
global heap_ptr
if heap_size <= heap_ptr:
collect_garbage(a, d)
cars[heap_ptr] = a
cdrs[heap_ptr] = d
heap_ptr += 1
return 'pair', heap_ptr - 1
def collect_garbage(a, d):
assert False # XXX
def car(x): return cars[untag('pair', x)]
def cdr(x): return cdrs[untag('pair', x)]
def explode(x):
"Make a Python list from a Lisp list."
result = []
while x is not nil:
result.append(car(x))
x = cdr(x)
return result
def implode(x):
"Make a Lisp list from a Python list."
result = nil
for v in reversed(x):
result = cons(v, result)
return result
################ Lispy: Scheme Interpreter in Python
### (c) Peter Norvig, 2010; See http://norvig.com/lispy.html
################ Symbol, Env classes
class Env(dict):
"An environment: a dict of {'var':val} pairs, with an outer Env."
def __init__(self, parms=(), args=(), outer=None):
self.update(zip(parms,args))
self.outer = outer
def find(self, var):
"Find the innermost Env where var appears."
if var in self: return self
if not self.outer: assert False, var
return self.outer.find(var)
def add_globals(env):
"Add some Scheme standard procedures to an environment."
import math, operator as op
env.update((Symbol(name), arith(f)) # sin, sqrt, ...
for name, f in vars(math).items()
if type(f) == type(op.add))
env.update(
{Symbol('+'): arith(op.add),
Symbol('-'): arith(op.sub),
Symbol('*'): arith(op.mul),
Symbol('/'): arith(op.div),
Symbol('not'): tester(lambda x: x is false),
Symbol('>'): tester(unnum(op.gt)),
Symbol('<'): tester(unnum(op.lt)),
Symbol('>='): tester(unnum(op.ge)),
Symbol('<='): tester(unnum(op.le)),
Symbol('='): tester(unnum(op.eq)),
# Symbol('equal?'): op.eq,
Symbol('eq?'): tester(op.is_),
# Symbol('length'): len,
Symbol('cons'): ('prim', cons),
Symbol('car'): ('prim', car),
Symbol('cdr'): ('prim', cdr),
# Symbol('append'): op.add,
# Symbol('list'): lambda *x: reduce(cons, ...
Symbol('pair?'): tester(lambda x: tag_of(x) == 'pair'),
Symbol('null?'): tester(lambda x: x is nil),
Symbol('symbol?'): tester(lambda x: tag_of(x) == 'symbol')})
return env
global_env = add_globals(Env())
def isa(x, tag): return tag_of(x) == tag
################ eval
quote_, if_, set_, define_, lambda_, begin_ = (
map(Symbol, 'quote if set! define lambda begin'.split()))
def eval(x, env=global_env):
"Evaluate an expression in an environment."
if isa(x, 'symbol'): # variable reference
return env.find(x)[x]
elif not isa(x, 'pair'): # constant literal
return x
elif car(x) is quote_: # (quote exp)
(_, exp) = explode(x)
return exp
elif car(x) is if_: # (if test conseq alt)
(_, test, conseq, alt) = explode(x)
return eval((alt if eval(test, env) is false else conseq), env)
elif car(x) is set_: # (set! var exp)
(_, var, exp) = explode(x)
env.find(var)[var] = eval(exp, env)
return false
elif car(x) is define_: # (define var exp)
(_, var, exp) = explode(x)
env[var] = eval(exp, env)
return false
elif car(x) is lambda_: # (lambda (var*) exp)
(_, vars, exp) = explode(x)
return 'fun', untag('pair', implode([exp, vars, env]))
elif car(x) is begin_: # (begin exp*)
val = false
for exp in explode(x)[1:]:
val = eval(exp, env)
return val
else: # (proc exp*)
args = [eval(exp, env) for exp in explode(x)]
proc = args.pop(0)
if isa(proc, 'prim'):
return untag('prim', proc)(*args)
if isa(proc, 'fun'):
(exp, vars, env) = explode(('pair', untag('fun', proc)))
return eval(exp, Env(explode(vars), args, env))
raise ValueError("Call to non-procedure")
################ parse, read, and user interaction
def read(s):
"Read a Scheme expression from a string."
return read_from(tokenize(s))
parse = read
def tokenize(s):
"Convert a string into a list of tokens."
return s.replace('(',' ( ').replace(')',' ) ').replace("'", " ' ").split()
def read_from(tokens):
"Read an expression from a sequence of tokens."
if len(tokens) == 0:
raise SyntaxError('unexpected EOF while reading')
token = tokens.pop(0)
if '(' == token:
L = []
while tokens[0] != ')':
L.append(read_from(tokens))
tokens.pop(0) # pop off ')'
return implode(L)
elif ')' == token:
raise SyntaxError('unexpected )')
elif "'" == token:
return implode([quote_, read_from(tokens)])
else:
return atom(token)
def atom(token):
"Numbers become numbers; every other token is a symbol."
try: return 'num', int(token)
except ValueError:
try: return 'num', float(token)
except ValueError:
return Symbol(token)
def to_string(x):
"Convert a Python object back into a Lisp-readable string."
if x is nil: return '()'
if isa(x, 'bool'): return '#t' if untag('bool', x) else '#f'
if isa(x, 'num'): return str(untag('num', x))
if isa(x, 'symbol'): return str(untag('symbol', x))
if isa(x, 'pair'): return '('+' '.join(map(to_string, explode(x)))+')'
if isa(x, 'fun'): return '#<fun>' # XXX more
if isa(x, 'prim'): return '#<'+str(untag('prim', x))+'>'
assert False
def repl(prompt='lis.py> '):
"A prompt-read-eval-print loop."
while True:
val = eval(parse(raw_input(prompt)))
if val is not None: print to_string(val)
|
|
#=== SOUND ===========================================================================================
# Convenience classes and functions for audio manipulation.
# Authors: Frederik De Bleser, Lieven Menschaert, Tom De Smedt.
# License: BSD (see LICENSE.txt for details).
# Copyright (c) 2008-2012 City In A Bottle (cityinabottle.org)
# http://cityinabottle.org/nodebox
import osc
import sys
import os
import subprocess
import StringIO
import signal
import time
import socket as _socket
def _find(match=lambda item: False, list=[]):
""" Returns the first item in the list for which match(item)=True, or None.
"""
for item in list:
if match(item): return item
#=====================================================================================================
#--- PROCESS -----------------------------------------------------------------------------------------
# True when running on Windows.
WINDOWS = sys.platform.startswith('win')
if not WINDOWS:
import select
import fcntl
def read_non_blocking(stream, bytes=1024, timeout=0):
# Reads a number of bytes from the given stream,
# without deadlocking when no more data is available (returns None instead).
fcntl.fcntl(stream, fcntl.F_SETFL, fcntl.fcntl(stream, fcntl.F_GETFL) | os.O_NONBLOCK)
if not select.select([stream], [], [], 0)[0]:
return None
return stream.read(bytes)
if WINDOWS:
import ctypes; from ctypes.wintypes import DWORD
import msvcrt
def read_non_blocking(stream, bytes=1024):
# Reads a number of bytes from the given stream,
# without deadlocking when no more data is available (returns None instead).
p = msvcrt.get_osfhandle(stream.fileno())
s = ctypes.create_string_buffer(1)
b = ctypes.windll.kernel32.PeekNamedPipe(p, s, 1, None, None, None)
if s.value:
c_read = DWORD()
s = ctypes.create_string_buffer(bytes+1)
b = ctypes.windll.kernel32.ReadFile(p, s, bytes+1, ctypes.byref(c_read), None)
s[c_read.value] = '\0'
return s.value.decode()
class Process(object):
def __init__(self, program, options={}, start=True):
""" Runs the given program (i.e. executable file path) as a background process
with the given command-line options.
"""
self.program = program
self.options = options
self._process = None
if start:
self.start()
@property
def started(self):
return self._process is not None
@property
def id(self):
return self._process \
and self._process.pid or None
pid = id
@property
def output(self, bytes=1024):
# Yields a number of bytes of output, or None if the process is idle.
if self._process is not None:
return read_non_blocking(self._process.stdout, bytes)
def start(self):
""" Starts the program with the given command-line options.
The output can be read from Process.output.
"""
o = [self.program]; [o.extend((k,v)) for k,v in self.options.items()]
o = [str(x) for x in o if x is not None]
self._process = subprocess.Popen(o,
stdout = subprocess.PIPE,
stderr = subprocess.STDOUT
)
def stop(self):
""" Attempts to stop the process.
Returns True when the process is stopped, False otherwise.
"""
if not self._process:
# The process has not been started.
return True
if hasattr(self._process, 'kill'):
# Popen.kill() works in Python 2.6+ on all platforms.
self._process.kill()
self._process = None
return True
if self._process.pid is not None and not WINDOWS:
# os.kill() works in Python 2.4+ on Unix and Mac OS X.
os.kill(self._process.pid, signal.SIGTERM)
time.sleep(0.1)
self._process = None
return True
if self._process.pid is not None and WINDOWS:
# Use ctypes on Windows platforms.
import ctypes
p = ctypes.windll.kernel32.OpenProcess(1, False, self._process.pid)
ctypes.windll.kernel32.TerminateProcess(p, -1)
ctypes.windll.kernel32.CloseHandle(p)
time.sleep(0.1)
self._process = None
return True
return False
#=====================================================================================================
#--- SOCKET ------------------------------------------------------------------------------------------
class Socket(_socket.socket):
def __init__(self, host, port):
""" Creates a socket connection to the given host (IP address) and port.
Socket.close() will close the connection when Socket.connections is 0
"""
_socket.socket.__init__(self, _socket.AF_INET, _socket.SOCK_DGRAM)
self.bind((host, port))
self.setblocking(0)
self.connections = 0
def close(self):
if self.connections <= 0:
_socket.socket.close(self)
_sockets = {}
def socket(host, port):
""" Returns the socket connection to the given host and port, creating it when none exists.
"""
return _sockets.setdefault("%s:%s" % (host, port), Socket(host, port))
#--- PUREDATA ----------------------------------------------------------------------------------------
# Pd application default paths:
# /usr/local/bin/pd
# /Applications/Pd-extended.app/Contents/Resources/bin/pd
# C:\Program Files\pd\bin\pd.exe
PD_UNIX1 = "pdextended"
PD_UNIX2 = "pd"
PD_MACOSX = "Pd-extended.app/Contents/Resources/bin/pd"
PD_WINDOWS = "pd\\bin\\pd.exe"
DEFAULT = "default"
# Default server.
LOCALHOST = "127.0.0.1"
# Default ports.
# PD.get() receives on port 44000, the Pd patch broadcasts on port 44000.
# PD.send() broadcasts on port 44001, the Pd patch receives on port 44001.
IN = 44000
OUT = 44001
class PDError(Exception):
pass
class PD(object):
def __init__(self, patch=None, buffer=128, options={}, start=False, path=DEFAULT):
""" Creates a network connection with PureData.
When a patch (.pd file) is given and start=True, loads PD with the patch in the background.
Otherwise, communication can be established with whatever patch is active in a running PD.
The PD.send() method sends data to the patch running at a given host and port.
The path defines the location of the PD executable.
A number of default locations are searched as well:
- the current folder,
- /usr/bin/pdextended (Unix, preferred),
- /usr/local/bin/pd (Unix),
- /Applications/Pd-extended.app/Contents/Resources/bin/pd (Mac OS X),
- C:\Program Files\pd\bin\pd.exe (Windows).
Command-line options can be given as a dictionary, e.g.
PD(options={'-alsa': None})
"""
path = path != DEFAULT and path or ""
path = _find(lambda x: os.path.exists(x), [
path,
os.path.join(path, PD_UNIX1),
os.path.join(path, PD_UNIX2),
os.path.join(path, PD_MACOSX),
os.path.join(path, PD_WINDOWS),
"usr/bin/" + PD_UNIX1,
"usr/local/bin/" + PD_UNIX1,
"usr/bin/" + PD_UNIX2,
"usr/local/bin/" + PD_UNIX2,
"/Applications/" + PD_MACOSX,
"C:\\Program Files\\" + PD_WINDOWS
])
self._path = path # PD executable location.
self._process = None # PD executable running in background.
self._callback = {} # [PDCallback, data] items indexed by path + host + port.
self._options = dict(options) # For PD-Extended 0.41- on Mac OS X, only works with -nogui.
self._options.setdefault("-nogui", None)
self._options.setdefault("-audiobuf", buffer)
self._options.setdefault("-open", patch)
if start:
self.start()
osc.init()
@property
def patch(self):
return self._options.get("-open")
@property
def buffer(self):
return self._options.get("-audiobuf")
def start(self):
""" Starts PD as a background process and loads PD.patch.
If PD is already running another patch, restarts the application.
"""
if self.patch is None \
or not os.path.exists(self.patch):
raise PDError, "no PD patch file at '%s'" % self.patch
if not self._path:
raise PDError, "no PD application found"
if not os.path.exists(self._path):
raise PDError, "no PD application at '%s'" % self._path
if not self._process:
self._process = Process(program=self._path, options=self._options)
def stop(self):
for callback in self._callback.values():
callback.stop()
return self._process \
and self._process.stop()
def send(self, data, path, host=LOCALHOST, port=OUT):
""" Sends the given list of data over OSC to PD.
The path specifies the address where PD receives the data e.g. "/creature/perch".
"""
osc.sendMsg(path, data, host, port)
def get(self, path, host=LOCALHOST, port=IN):
""" Returns the data sent from the given path in PD.
"""
id = "%s%s%s" % (path, host, port)
if not id in self._callback:
self._callback[id] = PDCallback(path, host, port)
return self._callback[id].data
def __del__(self):
try: self.stop()
except:
pass
@property
def output(self):
return self._process.output
class PDCallback:
def __init__(self, path, host=LOCALHOST, port=44001):
""" Creates a listener for data broadcast from Pd.
PDCallback.__call__() is called from PD.get().
"""
osc.bind(self, path)
self._path = path
self._data = []
self._socket = socket(host, port)
self._socket.connections += 1
def __call__(self, *data):
# First two arguments in the list are the path and typetags string.
self._data = data[0][2:] if data != "nodata" else []
@property
def data(self):
osc.getOSC(self._socket)
return self._data
def stop(self):
self._socket.connections -= 1
self._socket.close()
self._socket = None
|
|
"""\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urlparse, urllib, types
import handler
import xmlreader
try:
_StringTypes = [types.StringType, types.UnicodeType]
except AttributeError:
_StringTypes = [types.StringType]
# See whether the xmlcharrefreplace error handler is
# supported
try:
from codecs import xmlcharrefreplace_errors
_error_handling = "xmlcharrefreplace"
del xmlcharrefreplace_errors
except ImportError:
_error_handling = "strict"
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1"):
if out is None:
import sys
out = sys.stdout
handler.ContentHandler.__init__(self)
self._out = out
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
def _write(self, text):
if isinstance(text, str):
self._out.write(text)
else:
self._out.write(text.encode(self._encoding, _error_handling))
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
self._write('>')
def endElement(self, name):
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._out.write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._out.write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
self._write('>')
def endElementNS(self, name, qname):
self._write('</%s>' % self._qname(name))
def characters(self, content):
self._write(escape(content))
def ignorableWhitespace(self, content):
self._write(content)
def processingInstruction(self, target, data):
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base = ""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if type(source) in _StringTypes:
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urlparse.urljoin(base, sysid))
f = urllib.urlopen(source.getSystemId())
source.setByteStream(f)
return source
|
|
"""Data channels for calls.
"""
__docformat__ = 'restructuredtext en'
import time
from copy import copy
from .utils import *
from .enums import *
from .errors import SkypeError
class CallChannelManager(EventHandlingBase):
"""Instantiate this class to create a call channel manager. A call channel manager will
automatically create a data channel (based on the APP2APP protocol) for voice calls.
Usage
=====
You should access this class using the alias at the package level:
.. python::
import Skype4Py
skype = Skype4Py.Skype()
ccm = Skype4Py.CallChannelManager()
ccm.Connect(skype)
Read the constructor (`CallChannelManager.__init__`) documentation for a list of
accepted arguments.
Events
======
This class provides events.
The events names and their arguments lists can be found in the
`CallChannelManagerEvents` class in this module.
The use of events is explained in `EventHandlingBase` class which
is a superclass of this class.
"""
def __del__(self):
if getattr(self, '_App', None):
self._App.Delete()
self._App = None
self._Skype.UnregisterEventHandler('ApplicationStreams', self._OnApplicationStreams)
self._Skype.UnregisterEventHandler('ApplicationReceiving', self._OnApplicationReceiving)
self._Skype.UnregisterEventHandler('ApplicationDatagram', self._OnApplicationDatagram)
def __init__(self, Events=None, Skype=None):
"""Initializes the object.
:Parameters:
Events
An optional object with event handlers. See `EventHandlingBase` for more
information on events.
"""
EventHandlingBase.__init__(self)
if Events:
self._SetEventHandlerObj(Events)
self._App = None
self._Name = 'CallChannelManager'
self._ChannelType = cctReliable
self._Channels = []
self.Connect(Skype)
def _ApplicationDatagram(self, App, Stream, Text):
if App == self._App:
for ch in self_Channels:
if ch['stream'] == Stream:
msg = CallChannelMessage(Text)
self._CallEventHandler('Message', self, CallChannel(self, ch), msg)
break
def _ApplicationReceiving(self, App, Streams):
if App == self._App:
for ch in self._Channels:
if ch['stream'] in Streams:
msg = CallChannelMessage(ch.Stream.Read())
self._CallEventHandler('Message', self, CallChannel(self, ch), msg)
def _ApplicationStreams(self, App, Streams):
if App == self._App:
for ch in self._Channels:
if ch['stream'] not in Streams:
self._Channels.remove(ch)
self._CallEventHandler('Channels', self, self.Channels)
def _CallStatus(self, Call, Status):
if Status == clsRinging:
if self._App is None:
self.CreateApplication()
self._App.Connect(Call.PartnerHandle, True)
for stream in self._App.Streams:
if stream.PartnerHandle == Call.PartnerHandle:
self._Channels.append(dict(call=Call, stream=stream))
self._CallEventHandler('Channels', self, self.Channels)
break
elif Status in (clsCancelled, clsFailed, clsFinished, clsRefused, clsMissed):
for ch in self._Channels:
if ch['call'] == Call:
self._Channels.remove(ch)
self._CallEventHandler('Channels', self, self.Channels)
try:
ch['stream'].Disconnect()
except SkypeError:
pass
break
def Connect(self, Skype):
"""Connects this call channel manager instance to Skype. This is the first thing you should
do after creating this object.
:Parameters:
Skype : `Skype`
The Skype object.
:see: `Disconnect`
"""
self._Skype = Skype
self._Skype.RegisterEventHandler('CallStatus', self._CallStatus)
del self._Channels[:]
def CreateApplication(self, ApplicationName=None):
"""Creates an APP2APP application context. The application is automatically created using
`application.Application.Create` method.
:Parameters:
ApplicationName : unicode
Application name. Initial name, when the manager is created, is ``u'CallChannelManager'``.
"""
if ApplicationName is not None:
self.Name = tounicode(ApplicationName)
self._App = self._Skype.Application(self.Name)
self._Skype.RegisterEventHandler('ApplicationStreams', self._ApplicationStreams)
self._Skype.RegisterEventHandler('ApplicationReceiving', self._ApplicationReceiving)
self._Skype.RegisterEventHandler('ApplicationDatagram', self._ApplicationDatagram)
self._App.Create()
self._CallEventHandler('Created', self)
def Disconnect(self):
"""Disconnects from the Skype instance.
:see: `Connect`
"""
self._Skype.UnregisterEventHandler('CallStatus', self._CallStatus)
self._Skype = None
def _GetChannels(self):
return tuple(self._Channels)
Channels = property(_GetChannels,
doc="""All call data channels.
:type: tuple of `CallChannel`
""")
def _GetChannelType(self):
return self._ChannelType
def _SetChannelType(self, Value):
self._ChannelType = str(Value)
ChannelType = property(_GetChannelType, _SetChannelType,
doc="""Queries/sets the default channel type.
:type: `enums`.cct*
""")
def _GetCreated(self):
return (not not self._App)
Created = property(_GetCreated,
doc="""Returns True if the application context has been created.
:type: bool
""")
def _GetName(self):
return self._Name
def _SetName(self, Value):
self._Name = tounicode(Value)
Name = property(_GetName, _SetName,
doc="""Queries/sets the application context name.
:type: unicode
""")
class CallChannelManagerEvents(object):
"""Events defined in `CallChannelManager`.
See `EventHandlingBase` for more information on events.
"""
def Channels(self, Manager, Channels):
"""This event is triggered when list of call channels changes.
:Parameters:
Manager : `CallChannelManager`
The call channel manager object.
Channels : tuple of `CallChannel`
Updated list of call channels.
"""
def Created(self, Manager):
"""This event is triggered when the application context has successfully been created.
:Parameters:
Manager : `CallChannelManager`
The call channel manager object.
"""
def Message(self, Manager, Channel, Message):
"""This event is triggered when a call channel message has been received.
:Parameters:
Manager : `CallChannelManager`
The call channel manager object.
Channel : `CallChannel`
The call channel object receiving the message.
Message : `CallChannelMessage`
The received message.
"""
CallChannelManager._AddEvents(CallChannelManagerEvents)
class CallChannel(object):
"""Represents a call channel.
"""
def __repr__(self):
return Cached.__repr__(self, 'Manager', 'Call', 'Stream')
def SendTextMessage(self, Text):
"""Sends a text message over channel.
:Parameters:
Text : unicode
Text to send.
"""
if self.Type == cctReliable:
self.Stream.Write(Text)
elif self.Type == cctDatagram:
self.Stream.SendDatagram(Text)
else:
raise SkypeError(0, 'Cannot send using %s channel type' & repr(self.Type))
def _GetCall(self):
return self._Handle['call']
Call = property(_GetCall,
doc="""The call object associated with this channel.
:type: `Call`
""")
def _GetManager(self):
return self._Owner
Manager = property(_GetManager,
doc="""The call channel manager object.
:type: `CallChannelManager`
""")
def _GetStream(self):
return self._Handle['stream']
Stream = property(_GetStream,
doc="""Underlying APP2APP stream object.
:type: `ApplicationStream`
""")
def _GetType(self):
return self._Handle.get('type', self.Manager.ChannelType)
def _SetType(self, Value):
self._Handle['type'] = str(Value)
Type = property(_GetType, _SetType,
doc="""Type of this channel.
:type: `enums`.cct*
""")
class CallChannelMessage(object):
"""Represents a call channel message.
"""
def __init__(self, Text):
"""Initializes the object.
:Parameters:
Text : unicode
The message text.
"""
self._Text = tounicode(Text)
def _GetText(self):
return self._Text
def _SetText(self, Value):
self._Text = tounicode(Value)
Text = property(_GetText, _SetText,
doc="""Queries/sets the message text.
:type: unicode
""")
|
|
import logging
import os
import threading
import time
from enum import Enum
from functools import partial
from pathlib import Path
from urllib.parse import urljoin
import arrow
import requests
import tqdm
from . import command, upload
from .info import URL, DEFAULT_REMOTE_DIR
from .info import RawFileInfo, SimpleFileInfo
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# Python 3.4 compatibility via scandir backport
if hasattr(os, "scandir"):
scandir = os.scandir
else:
import scandir
scandir = scandir.scandir
class Direction(str, Enum):
up = "upload" # upload direction
down = "download" # download direction
#####################################
# Synchronizing newly created files
class Monitor:
"""Synchronizes newly created files TO or FROM FlashAir
in separate threads"""
def __init__(self, *filters, local_dir=".",
remote_dir=DEFAULT_REMOTE_DIR):
self._filters = filters
self._local_dir = local_dir
self._remote_dir = remote_dir
self.running = threading.Event()
self.thread = None
def _run(self, method):
assert self.thread is None
self.running.set()
self.thread = threading.Thread(target=self._run_sync, args=(method,))
self.thread.start()
def _run_sync(self, method):
files = method(*self._filters, local_dir=self._local_dir,
remote_dir=self._remote_dir)
while self.running.is_set():
_, new = next(files)
if not new:
time.sleep(0.3)
def sync_both(self):
self._run(up_down_by_arrival)
def sync_up(self):
self._run(up_by_arrival)
def sync_down(self):
self._run(down_by_arrival)
def stop(self):
self.running.clear()
def join(self):
if self.thread:
self.thread.join()
self.thread = None
def up_down_by_arrival(*filters, local_dir=".",
remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a local directory and a remote FlashAir directory and
generates sets of new files to be uploaded or downloaded.
Sets to upload are generated in a tuple
like (Direction.up, {...}), while download sets to download
are generated in a tuple like (Direction.down, {...}). The generator yields
before each upload or download actually takes place."""
local_monitor = watch_local_files(*filters, local_dir=local_dir)
remote_monitor = watch_remote_files(*filters, remote_dir=remote_dir)
_, lfile_set = next(local_monitor)
_, rfile_set = next(remote_monitor)
_notify_sync_ready(len(lfile_set), local_dir, remote_dir)
_notify_sync_ready(len(rfile_set), remote_dir, local_dir)
processed = set()
for new_local, new_remote in zip(local_monitor, remote_monitor):
new_local, local_set = new_local
local_arrivals = {f for f in new_local if f.filename not in processed}
yield Direction.up, local_arrivals
if local_arrivals:
new_names.update(f.filename for f in local_arrivals)
_notify_sync(Direction.up, local_arrivals)
up_by_files(local_arrivals, remote_dir)
_notify_sync_ready(len(local_set), local_dir, remote_dir)
new_remote, remote_set = new_remote
remote_arrivals = {f for f in new_remote if f.filename not in processed}
yield Direction.down, remote_arrivals
if remote_arrivals:
new_names.update(f.filename for f in remote_arrivals)
_notify_sync(Direction.down, remote_arrivals)
yield Direction.down, remote_arrivals
down_by_files(remote_arrivals, local_dir)
_notify_sync_ready(len(remote_set), remote_dir, local_dir)
def up_by_arrival(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a local directory and
generates sets of new files to be uploaded to FlashAir.
Sets to upload are generated in a tuple like (Direction.up, {...}).
The generator yields before each upload actually takes place."""
local_monitor = watch_local_files(*filters, local_dir=local_dir)
_, file_set = next(local_monitor)
_notify_sync_ready(len(file_set), local_dir, remote_dir)
for new_arrivals, file_set in local_monitor:
yield Direction.up, new_arrivals # where new_arrivals is possibly empty
if new_arrivals:
_notify_sync(Direction.up, new_arrivals)
up_by_files(new_arrivals, remote_dir)
_notify_sync_ready(len(file_set), local_dir, remote_dir)
def down_by_arrival(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR):
"""Monitors a remote FlashAir directory and generates sets of
new files to be downloaded from FlashAir.
Sets to download are generated in a tuple like (Direction.down, {...}).
The generator yields AFTER each download actually takes place."""
remote_monitor = watch_remote_files(*filters, remote_dir=remote_dir)
_, file_set = next(remote_monitor)
_notify_sync_ready(len(file_set), remote_dir, local_dir)
for new_arrivals, file_set in remote_monitor:
if new_arrivals:
_notify_sync(Direction.down, new_arrivals)
down_by_files(new_arrivals, local_dir)
_notify_sync_ready(len(file_set), remote_dir, local_dir)
yield Direction.down, new_arrivals
###################################################
# Sync ONCE in the DOWN (from FlashAir) direction
def down_by_all(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", **_):
files = command.list_files(*filters, remote_dir=remote_dir)
down_by_files(files, local_dir=local_dir)
def down_by_files(to_sync, local_dir="."):
"""Sync a given list of files from `command.list_files` to `local_dir` dir"""
for f in to_sync:
_sync_remote_file(local_dir, f)
def down_by_time(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
"""Sync most recent file by date, time attribues"""
files = command.list_files(*filters, remote_dir=remote_dir)
most_recent = sorted(files, key=lambda f: f.datetime)
to_sync = most_recent[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir)
def down_by_name(*filters, remote_dir=DEFAULT_REMOTE_DIR, local_dir=".", count=1):
"""Sync files whose filename attribute is highest in alphanumeric order"""
files = command.list_files(*filters, remote_dir=remote_dir)
greatest = sorted(files, key=lambda f: f.filename)
to_sync = greatest[-count:]
_notify_sync(Direction.down, to_sync)
down_by_files(to_sync[::-1], local_dir=local_dir)
def _sync_remote_file(local_dir, remote_file_info):
local = Path(local_dir, remote_file_info.filename)
local_name = str(local)
remote_size = remote_file_info.size
if local.exists():
local_size = local.stat().st_size
if local.stat().st_size == remote_size:
logger.info(
"Skipping '{}': already exists locally".format(
local_name))
else:
logger.warning(
"Removing {}: local size {} != remote size {}".format(
local_name, local_size, remote_size))
os.remove(local_name)
_stream_to_file(local_name, remote_file_info)
else:
_stream_to_file(local_name, remote_file_info)
def _stream_to_file(local_name, fileinfo):
logger.info("Copying remote file {} to {}".format(
fileinfo.path, local_name))
streaming_file = _get_file(fileinfo)
_write_file_safely(local_name, fileinfo, streaming_file)
def _get_file(fileinfo):
url = urljoin(URL, fileinfo.path)
logger.info("Requesting file: {}".format(url))
return requests.get(url, stream=True)
def _write_file_safely(local_path, fileinfo, response):
"""attempts to stream a remote file into a local file object,
removes the local file if it's interrupted by any error"""
try:
_write_file(local_path, fileinfo, response)
except BaseException as e:
logger.warning("{} interrupted writing {} -- "
"cleaning up partial file".format(
e.__class__.__name__, local_path))
os.remove(local_path)
raise e
def _write_file(local_path, fileinfo, response):
start = time.time()
pbar_size = fileinfo.size / (5 * 10**5)
pbar = tqdm.tqdm(total=int(pbar_size))
if response.status_code == 200:
with open(local_path, "wb") as outfile:
for chunk in response.iter_content(5*10**5):
progress = len(chunk) / (5 * 10**5)
_update_pbar(pbar, progress)
outfile.write(chunk)
else:
raise requests.RequestException("Expected status code 200")
pbar.close()
duration = time.time() - start
logger.info("Wrote {} in {:0.2f} s ({:0.2f} MB, {:0.2f} MB/s)".format(
fileinfo.filename, duration, fileinfo.size / 10 ** 6,
fileinfo.size / (duration * 10 ** 6)))
def _update_pbar(pbar, val):
update_val = max(int(val), 1)
try:
pbar.update(update_val)
except Exception as e:
# oh, c'mon TQDM, progress bars shouldn't crash software
logger.debug("TQDM progress bar error: {}({})".format(
e.__class__.__name__, e))
###########################################
# Local and remote file watcher-generators
def watch_local_files(*filters, local_dir="."):
list_local = partial(list_local_files, *filters, local_dir=local_dir)
old_files = new_files = set(list_local())
while True:
yield new_files - old_files, new_files
old_files = new_files
new_files = set(list_local())
def watch_remote_files(*filters, remote_dir="."):
command.memory_changed() # clear change status to start
list_remote = partial(command.list_files,
*filters, remote_dir=remote_dir)
old_files = new_files = set(list_remote())
while True:
yield new_files - old_files, new_files
old_files = new_files
if command.memory_changed():
new_files = set(list_remote())
#####################################################
# Synchronize ONCE in the UP direction (to FlashAir)
def up_by_all(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR, **_):
files = list_local_files(*filters, local_dir=local_dir)
up_by_files(list(files), remote_dir=remote_dir)
def up_by_files(to_sync, remote_dir=DEFAULT_REMOTE_DIR, remote_files=None):
"""Sync a given list of local files to `remote_dir` dir"""
if remote_files is None:
remote_files = command.map_files_raw(remote_dir=remote_dir)
for local_file in to_sync:
_sync_local_file(local_file, remote_dir, remote_files)
def up_by_time(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR, count=1):
"""Sync most recent file by date, time attribues"""
remote_files = command.map_files_raw(remote_dir=remote_dir)
local_files = list_local_files(*filters, local_dir=local_dir)
most_recent = sorted(local_files, key=lambda f: f.datetime)
to_sync = most_recent[-count:]
_notify_sync(Direction.up, to_sync)
up_by_files(to_sync[::-1], remote_dir, remote_files)
def up_by_name(*filters, local_dir=".", remote_dir=DEFAULT_REMOTE_DIR, count=1):
"""Sync files whose filename attribute is highest in alphanumeric order"""
remote_files = command.map_files_raw(remote_dir=remote_dir)
local_files = list_local_files(*filters, local_dir=local_dir)
greatest = sorted(local_files, key=lambda f: f.filename)
to_sync = greatest[-count:]
_notify_sync(Direction.up, to_sync)
up_by_files(to_sync[::-1], remote_dir, remote_files)
def _sync_local_file(local_file_info, remote_dir, remote_files):
local_name = local_file_info.filename
local_size = local_file_info.size
if local_name in remote_files:
remote_file_info = remote_files[local_name]
remote_size = remote_file_info.size
if local_size == remote_size:
logger.info(
"Skipping '{}' already exists on SD card".format(
local_name))
else:
logger.warning(
"Removing remote file {}: "
"local size {} != remote size {}".format(
local_name, local_size, remote_size))
upload.delete_file(remote_file_info.path)
_stream_from_file(local_file_info, remote_dir)
else:
_stream_from_file(local_file_info, remote_dir)
def _stream_from_file(fileinfo, remote_dir):
logger.info("Uploading local file {} to {}".format(
fileinfo.path, remote_dir))
_upload_file_safely(fileinfo, remote_dir)
def _upload_file_safely(fileinfo, remote_dir):
"""attempts to upload a local file to FlashAir,
tries to remove the remote file if interrupted by any error"""
try:
upload.upload_file(fileinfo.path, remote_dir=remote_dir)
except BaseException as e:
logger.warning("{} interrupted writing {} -- "
"cleaning up partial remote file".format(
e.__class__.__name__, fileinfo.path))
upload.delete_file(fileinfo.path)
raise e
def list_local_files(*filters, local_dir="."):
all_entries = scandir(local_dir)
file_entries = (e for e in all_entries if e.is_file())
for entry in file_entries:
stat = entry.stat()
size = stat.st_size
datetime = arrow.get(stat.st_mtime)
path = str(Path(local_dir, entry.name))
info = SimpleFileInfo(local_dir, entry.name, path, size, datetime)
if all(filt(info) for filt in filters):
yield info
def list_local_files_raw(*filters, local_dir="."):
all_entries = scandir(local_dir)
all_files = (e for e in all_entries if e.is_file() and
all(filt(e) for filt in filters))
for entry in all_files:
path = str(Path(local_dir, entry.name))
yield RawFileInfo(local_dir, entry.name, path, entry.stat().st_size)
def _notify_sync(direction, files):
logger.info("{:d} files to {:s}:\n{}".format(
len(files), direction,
"\n".join(" " + f.filename for f in files)))
def _notify_sync_ready(num_old_files, from_dir, to_dir):
logger.info("Ready to sync new files from {} to {} "
"({:d} existing files ignored)".format(
from_dir, to_dir, num_old_files))
|
|
#Python3.5 Mac OS X
#2015.2.12
#[email protected]
'''
implement of decision tree algorithm C4.5
changed by ID3 in choose best feature to split
from info_gain to info_gain_ratio
to avoid choose the feature which has too many values
'''
from math import log
import operator
import numpy as np
#a small data set to verify the correction of program
def create_dataset():
dataset = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
return dataset, feature_names
#
def pre_process(dataset):
pass
#transform the text file to matrix
def file_to_matrix(filename):
fr=open(filename)
num_of_lines=len(fr.readlines())
matrix=np.zeros((num_of_lines,3))
class_labels=[]
feature_names=[]
fr=open(filename)
feature_names=fr.readline().split('\t')[:-1]
index=0
for line in fr.readlines():
line=line.strip()
words_list = line.split('\t')
matrix[index,:]=words_list[0:-2]
class_labels.append(words_list[-1])
index+=1
return matrix,class_labels,feature_names
#partiton dataset to trainset and testset
def partition_dataset(dataset,ratio):
l=len(dataset)
len_test=l*ratio
test_set=dataset[:len_test]
train_set=dataset[len_test:]
return train_set,test_set
#calculate entropy
def entropy(dataset):
length=len(dataset)
labels={}
for feature_vec in dataset:
label=feature_vec[-1]
if label not in labels.keys():
labels[label]=0
labels[label]+=1
entropy=0.0
for item in labels:
prob=labels[item]/length
entropy-=prob*log(prob,2)
return entropy
#return the splitted dataset where feature equal to value
#and remove the feature_axis
def split_dataset(dataset,index,value):
result=[]
for feature_vec in dataset:
if feature_vec[index]==value:
reduced_vec=feature_vec[:index]
reduced_vec.extend(feature_vec[index+1:])
result.append(reduced_vec)
return result
#discretize continues value by choose proper center-value
#the axis is the column need to discretize
def discretize(dataset,feature_values,axis):
base_entropy=entropy(dataset)
best_info_gain_ratio=0.0
split_entropy=0
split_info=0.0
best_discret_split=[]
sorted_values=sorted(set(feature_values))
for i in range(len(sorted_values)-1):
cen=(sorted_values[i]+sorted_values[i+1])/2
discret_values=[ 'L'+str(cen) if (value<cen) else 'H'+str(cen) for value in feature_values]
unique_values=set(discret_values)
tmp_data=dataset.copy()
for index in range(len(discret_values)):
tmp_data[index][axis]=discret_values[index]
for value in unique_values:
sub_dataset=split_dataset(tmp_data, axis,value)
prob=len(sub_dataset)/float(len(tmp_data))
split_entropy+=prob*entropy(sub_dataset)
split_info-=prob*log(prob,2)
info_gain_ratio = (base_entropy - split_entropy)/split_info
if info_gain_ratio > best_info_gain_ratio:
best_info_gain_ratio=info_gain_ratio
best_discret_split=discret_values
return best_discret_split
#the best feature is the one that has largest infoGain
def choose_best_feature_split(dataset):
base_entropy=entropy(dataset)
num_of_features=len(dataset[0])-1
best_gain_ratio=0.0 #it's info gain in ID3
best_feature=-1
for i in range(num_of_features):
feature_values=[x[i] for x in dataset]
uni_feature_values=set(feature_values)
new_entropy=0.0
split_info=0.0
for value in uni_feature_values:
sub_dataset=split_dataset(dataset,i,value)
D=len(dataset)
Dj=len(sub_dataset)
prob=Dj/D
split_info -= (Dj/D)*log(Dj/D,2)
new_entropy+=prob*entropy(sub_dataset)
gain_ratio=(base_entropy-new_entropy)/split_info
if gain_ratio>best_gain_ratio:
best_gain_ratio=gain_ratio
best_feature=i
return best_feature
#get the majority count label of a set to represent the set
def majority(label_list):
label_count={}
for vote in label_list:
if vote not in label_count.keys():
label_count[vote] = 0
label_count[vote] += 1
sorted_label_count = sorted(label_count.items(), key=operator.itemgetter(1), reverse=True)
return sorted_label_count[0][0]
#equal to create_tree
def train(dataset,features):
tree=create_tree(dataset,features)
return tree
#recursively create a tree based on entropy
def create_tree(dataset,features):
label_list=[x[-1] for x in dataset]
#process special case
if label_list.count(label_list[0])==len(label_list):
return (label_list[0],str(len(dataset))+'/'+','.join(label_list))
if len(dataset[0])==1:
return (majority(label_list),str(len(dataset))+'/'+','.join(label_list))
index=choose_best_feature_split(dataset)
best_feature=features[index]
my_tree = {best_feature:{}}
info_tree = {best_feature:{}}
del(feature[index])
feature_values = [row[index] for row in dataset]
unique_values = set(feature_values)
for value in unique_values:
sub_features = features[:]
sub_dataset=split_dataset(dataset,index,value)
label_list=[x[-1] for x in sub_dataset]
label_list_string=','.join(label_list)
subtree = create_tree(sub_dataset,sub_features)
my_tree[best_feature][value] = subtree[0]
if type(subtree[0]).__name__ == 'dict':
info_tree[best_feature][str(value)+'/'+str(len(sub_dataset))+'/'+label_list_string]=subtree[1]
else:
info_tree[best_feature][str(value)+'/'+str(len(sub_dataset))+'/'+label_list_string]=subtree[0]
tree = (my_tree,info_tree)
return tree
def real_error_rate(N,e,CF):
pass
def prune(tree):
first_key=tree.keys()[0]
subtree=tree[first_key]
#all of subtree[key] is leaf node
tree_bottom = all(type(subtree[key]).__name__ != 'dict' for key in subtree.keys())
if tree_bottom==True:
weighted_sum_error=0
subtree_cases=0
subtree_e=0
subtree_class_list=[]
for key in subtree.keys():
classl_list=key.split('/')[2].split(',')
leaf_class=class_list
subtree_class_list.extend(leaf_class)
N=int(key.split('/')[1])
subtree_cases+=N
leaf_class_labels=subtree[key]
e=N-leaf_class.count(leaf_class_labels)
weighted_sum_error += real_error_rate(N,e)*N
subtree_esubtree_cases-subtree_class_list.count(majority(subtree_class_list))
weighted_avg_error = weighted_sum_error/subtree_cases
subtree_error=real_error_rate(subtree_cases,subtree_e)
if subtree_error < weighted_avg_error or len(subtree_class_list)==1
new_class = majority(subtree_class_list)
return new_class
else
return tree
for key in subtree.keys():
if type(subtree[key]).__name__ == 'dict':
subtree[key] = prune(subtree[key])
return tree
#classify the test sample by the input tree
def classify(tree, test, test_features):
first_key=list(tree.keys())[0]
value_dict=tree[first_key]
#print(firstKey)
#print(featureNames)
feature_index=test_features.index(first_key)
for key in value_dict.keys():
if test[feature_index]==key:
if type(value_dict[key]).__name__=='dict':
class_label= classify(value_dict[key],test,test_features)
else:
class_label=value_dict[key]
return class_label
#based on inputTree and features, classify tests and compare the result with class labels
def test(tree, testset, test_features):
error_count=0.0
num_of_tests=len(testset)
for i in range(num_of_tests):
result=classify(tree,test[i],test_features)
if result != labels[i]:
error_count+=1
error_rate=error_count/num_of_tests
return error_rate
|
|
from typing import Dict, List
from unittest.mock import Mock
from pytest import fixture, raises
from smif.data_layer.store import Store
from smif.decision.decision import DecisionManager, RuleBased
from smif.exception import SmifDataNotFoundError
@fixture(scope='function')
def plan():
planned_interventions = [
{'name': 'small_pumping_station_oxford', 'build_year': 2010},
{'name': 'small_pumping_station_abingdon', 'build_year': 2015},
{'name': 'large_pumping_station_oxford', 'build_year': 2020}
]
return planned_interventions
@fixture(scope='function')
def get_strategies():
strategies = [{'strategy': 'pre-specified-planning',
'description': 'build_nuclear',
'interventions': [
{'name': 'nuclear_large', 'build_year': 2012},
{'name': 'carrington_retire', 'build_year': 2011}]
}]
return strategies
@fixture(scope='function')
def get_register():
lifetime = {'technical_lifetime': {'value': 99}}
register = {'nuclear_large': lifetime,
'carrington_retire': lifetime,
'small_pumping_station_oxford': lifetime,
'small_pumping_station_abingdon': lifetime,
'large_pumping_station_oxford': lifetime
}
return register
class TestRuleBasedProperties:
def test_timesteps(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
assert dm.current_timestep == 2010
assert dm.next_timestep == 2015
def test_timesteps_end(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
dm.current_timestep = 2020
assert dm.next_timestep is None
def test_timesteps_begin(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
dm.current_timestep = 2010
assert dm.previous_timestep is None
def test_timesteps_first_last(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
assert dm.first_timestep == 2010
assert dm.last_timestep == 2020
def test_interventions(self):
all_interventions = {'test_intervention': {'name': 'test_intervention'}}
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, all_interventions)
assert dm.available_interventions([]) == ['test_intervention']
def test_interventions_planned(self):
all_interventions = {'test_intervention': {'name': 'test_intervention'},
'planned_intervention': {'name': 'planned_intervention'}}
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, all_interventions)
actual = dm.available_interventions([{'name': 'planned_intervention'}])
expected = ['test_intervention']
assert actual == expected
def test_get_intervention(self):
interventions = {'a': {'name': 'a'},
'b': {'name': 'b'},
'c': {'name': 'c'}}
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, interventions)
assert dm.get_intervention('a') == interventions['a']
with raises(SmifDataNotFoundError) as ex:
dm.get_intervention('z')
msg = "Intervention 'z' is not found in the list of available interventions"
assert msg in str(ex)
class TestRuleBasedIterationTimestepAccounting:
"""Test that the iteration and timestep accounting methods properly follow
the path through the decision iterations
2010 - 0, 1
2015 - 2, 3
"""
@fixture(scope='function')
def dm(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
return dm
def test_first_iteration_base_year(self, dm):
dm.current_timestep = 2010
dm.current_iteration = 1
dm._max_iteration_by_timestep[2010] = 1
assert dm.get_previous_iteration_timestep() is None
def test_second_iteration_base_year(self, dm):
dm.current_timestep = 2010
dm.current_iteration = 2
dm._max_iteration_by_timestep[2010] = 2
assert dm.get_previous_iteration_timestep() == (2010, 1)
def test_second_iteration_next_year(self, dm):
dm.current_timestep = 2015
dm.current_iteration = 3
dm._max_iteration_by_timestep[2010] = 2
dm._max_iteration_by_timestep[2015] = 3
assert dm.get_previous_iteration_timestep() == (2010, 2)
def test_third_iteration_next_year(self, dm):
dm.current_timestep = 2015
dm.current_iteration = 4
dm._max_iteration_by_timestep[2010] = 2
dm._max_iteration_by_timestep[2015] = 4
assert dm.get_previous_iteration_timestep() == (2015, 3)
class TestRuleBased:
def test_initialisation(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
assert dm.timesteps == timesteps
assert dm.satisfied is False
assert dm.current_timestep == 2010
assert dm.current_iteration == 0
def test_generator(self):
timesteps = [2010, 2015, 2020]
dm = RuleBased(timesteps, Mock())
actual = next(dm)
assert actual == {
'decision_iterations': [1],
'timesteps': [2010]
}
dm.satisfied = True
actual = next(dm)
assert actual == {
'decision_iterations': [2],
'timesteps': [2015],
'decision_links': {
2: 1
}
}
assert dm.satisfied is False
actual = next(dm)
assert actual == {
'decision_iterations': [3],
'timesteps': [2015],
'decision_links': {
3: 1
}
}
assert dm.satisfied is False
dm.satisfied = True
dm.current_timestep = 2020
actual = next(dm)
assert actual is None
class TestDecisionManager():
@fixture(scope='function')
def decision_manager(self, empty_store) -> DecisionManager:
empty_store.write_model_run({'name': 'test', 'sos_model': 'test_sos_model'})
empty_store.write_sos_model({'name': 'test_sos_model', 'sector_models': []})
empty_store.write_strategies('test', [])
sos_model = Mock()
sos_model.name = 'test_sos_model'
sos_model.sector_models = []
df = DecisionManager(empty_store, [2010, 2015], 'test', sos_model)
return df
def test_decision_manager_init(self, decision_manager: DecisionManager):
df = decision_manager
dm = df.decision_loop()
bundle = next(dm)
assert bundle == {
'decision_iterations': [0],
'timesteps': [2010, 2015]
}
with raises(StopIteration):
next(dm)
def test_available_interventions(self, decision_manager: DecisionManager):
df = decision_manager
df._register = {'a': {'name': 'a'},
'b': {'name': 'b'},
'c': {'name': 'c'}}
assert df.available_interventions == df._register
df.planned_interventions = {(2010, 'a'), (2010, 'b')}
expected = {'c': {'name': 'c'}}
assert df.available_interventions == expected
def test_get_intervention(self, decision_manager: DecisionManager):
df = decision_manager
df._register = {'a': {'name': 'a'},
'b': {'name': 'b'},
'c': {'name': 'c'}}
assert df.get_intervention('a') == {'name': 'a'}
with raises(SmifDataNotFoundError):
df.get_intervention('z')
def test_buildable(self, decision_manager):
decision_manager._timesteps = [2010, 2015]
assert decision_manager.buildable(2010, 2010) is True
assert decision_manager.buildable(2011, 2010) is True
def test_historical_intervention_buildable(self, decision_manager):
decision_manager._timesteps = [2020, 2030]
assert decision_manager.buildable(1980, 2020) is True
assert decision_manager.buildable(1990, 2020) is True
def test_buildable_raises(self, decision_manager):
with raises(ValueError):
decision_manager.buildable(2015, 2014)
def test_within_lifetime(self, decision_manager):
assert decision_manager.within_lifetime(2010, 2010, 1)
def test_within_lifetime_does_not_check_start(self, decision_manager):
"""Note that the ``within_lifetime`` method does not check
that the build year is compatible with timestep
"""
assert decision_manager.within_lifetime(2011, 2010, 1)
def test_negative_lifetime_raises(self, decision_manager):
with raises(ValueError):
decision_manager.within_lifetime(2010, 2010, -1)
class TestDecisionManagerDecisions:
@fixture(scope='function')
def decision_manager(self, empty_store) -> DecisionManager:
empty_store.write_model_run({'name': 'test', 'sos_model': 'test_sos_model'})
empty_store.write_sos_model({'name': 'test_sos_model', 'sector_models': []})
empty_store.write_strategies('test', [])
sos_model = Mock()
sos_model.name = 'test_sos_model'
sos_model.sector_models = []
interventions = {'test': {'technical_lifetime': {'value': 99}},
'planned': {'technical_lifetime': {'value': 99}},
'decided': {'technical_lifetime': {'value': 99}}
}
df = DecisionManager(empty_store, [2010, 2015], 'test', sos_model)
df._register = interventions
return df
def test_get_decisions(self, decision_manager: DecisionManager):
dm = decision_manager
mock_handle = Mock()
dm._decision_module = Mock()
dm._decision_module.get_decision = Mock(
return_value=[{'name': 'test', 'build_year': 2010}])
actual = dm._get_decisions(dm._decision_module, mock_handle)
expected = [(2010, 'test')]
assert actual == expected
def test_get_and_save_decisions_dm(self, decision_manager: DecisionManager):
"""Test that the ``get_and_save_decisions`` method updates pre-decision
state with a new decision and writes it to store
"""
dm = decision_manager
dm._decision_module = Mock()
dm._decision_module.get_decision = Mock(
return_value=[{'name': 'decided', 'build_year': 2010}])
dm._decision_module.get_previous_state = Mock(return_value=[])
dm.get_and_save_decisions(0, 2010)
actual = dm._store # type: Store
expected = [{'name': 'decided', 'build_year': 2010}] # type: List[Dict]
assert actual.read_state('test', 2010, decision_iteration=0) == expected
def test_get_and_save_decisions_prespec(self,
decision_manager: DecisionManager):
"""Test that the ``get_and_save_decisions`` method updates pre-decision
state with a pre-specified planning and writes it to store
"""
dm = decision_manager
dm.planned_interventions = [(2010, 'planned')]
dm.get_and_save_decisions(0, 2010)
actual = dm._store # type: Store
expected = [{'name': 'planned', 'build_year': 2010}] # type: List[Dict]
assert actual.read_state('test', 2010, decision_iteration=0) == expected
def test_pre_spec_and_decision_module(self,
decision_manager: DecisionManager):
dm = decision_manager
dm._decision_module = Mock()
dm._decision_module.get_decision = Mock(
return_value=[{'name': 'decided', 'build_year': 2010}])
dm._decision_module.get_previous_state = Mock(return_value=[])
dm.planned_interventions = [(2010, 'planned')]
dm.get_and_save_decisions(0, 2010)
actual = dm._store.read_state('test', 2010, decision_iteration=0) # type: List[Dict]
expected = set([('decided', 2010), ('planned', 2010)])
assert set([(x['name'], x['build_year']) for x in actual]) == expected
|
|
from mininet.topo import *
from scipy.stats import truncnorm, tstd, poisson, expon
from numpy.random import randint, uniform
from datetime import datetime
from time import time
import os
import sys
import signal
LATENCY_METRIC_MIN_AVERAGE_DELAY = 1
LATENCY_METRIC_MIN_MAXIMUM_DELAY = 2
# MEDIA_DURATION_SECONDS = 70
class ReceiverLogStats(object):
def __init__(self, filename, recv_bytes, recv_packets, lost_packets):
self.filename = filename
self.recv_bytes = int(recv_bytes)
self.recv_packets = int(recv_packets)
self.lost_packets = int(lost_packets)
def debug_print(self):
print 'Multicast Receiver Log: ' + str(self.filename)
print 'RecvBytes: ' + str(self.recv_bytes) + ' RecvPackets: ' + str(self.recv_packets) + ' LostPackets: ' + str(self.lost_packets)
class MulticastReceiverApplication(object):
APP_STATE_PRELAUNCH = 1
APP_STATE_RUNNING = 2
APP_STATE_COMPLETE = 3
def __init__(self, host, group_ip, mcast_port, echo_port, init_time, service_time):
self.host = host
self.group_ip = group_ip
self.mcast_port = mcast_port
self.echo_port = echo_port
self.init_time = init_time
self.service_time = service_time
self.terminate_time = init_time + service_time
self.log_filename = 'mcastlog_' + str(self.group_ip.replace('.', '_')) + '_' + str(host) + '_' + str(randint(0,sys.maxint)) + '.log'
self.app_process = None
self.log_stats = None
self.app_state = MulticastReceiverApplication.APP_STATE_PRELAUNCH
def launch_receiver_application(self):
if self.app_state == MulticastReceiverApplication.APP_STATE_PRELAUNCH and self.app_process is None:
with open(os.devnull, "w") as fnull:
vlc_rcv_command = ['python', './multicast_receiver_VLC.py', self.group_ip, str(self.mcast_port), str(self.echo_port), str(self.log_filename)]
# print 'Running: ' + ' '.join(vlc_rcv_command)
self.app_process = self.host.popen(vlc_rcv_command, stdout=fnull, stderr=fnull, close_fds=True, shell=False)
self.app_state = MulticastReceiverApplication.APP_STATE_RUNNING
def terminate_receiver_application(self):
if self.app_state == MulticastReceiverApplication.APP_STATE_RUNNING and self.app_process is not None:
# Terminate the application
self.app_process.send_signal(signal.SIGINT)
self.app_state = MulticastReceiverApplication.APP_STATE_COMPLETE
def read_log_stats(self):
if self.app_process is not None:
self.app_process.wait()
self.app_process = None
if self.log_stats is None:
# Read the application's log file and record relevant stats
try:
log_file = open(self.log_filename, 'r')
# print 'Reading log file: ' + str(self.log_filename)
for line in log_file:
if 'RecvPackets:' in line:
line_split = line.split(' ')
recv_packets = line_split[0][len('RecvPackets:'):]
recv_bytes = line_split[1][len('RecvBytes:'):]
lost_packets = line_split[2][len('LostPackets:'):]
print str(self) + ' Recv:' + str(recv_packets) + ' Lost:' + str(lost_packets),
self.log_stats = ReceiverLogStats(str(self.log_filename), recv_bytes, recv_packets, lost_packets)
# self.log_stats.debug_print()
break
log_file.close()
# print 'Read log file: ' + str(self.log_filename)
# Remove the application log file
os.remove(self.log_filename)
except IOError as e:
print 'WARNING: Log file ' + str(self.log_filename) + ' was not found. (App service time: ' + str(self.service_time) + ')'
self.log_stats = ReceiverLogStats(str(self.log_filename), 0, 0, 0)
def get_recv_packets(self):
if self.log_stats is None and self.app_state == MulticastReceiverApplication.APP_STATE_COMPLETE:
self.read_log_stats()
if self.log_stats is None:
return 0
else:
return int(self.log_stats.recv_packets)
def get_lost_packets(self):
if self.log_stats is None and self.app_state == MulticastReceiverApplication.APP_STATE_COMPLETE:
self.read_log_stats()
if self.log_stats is None:
return 0
else:
return int(self.log_stats.lost_packets)
def get_app_state(self):
return self.app_state
def __str__(self):
return 'Recv-' + str(self.group_ip) + '-' + str(self.host)
class DynamicMulticastGroupDefinition(object):
EVENT_RECEIVER_INIT = 'Recv_Init'
EVENT_RECEIVER_TERMINATION = 'Recv_Term'
def __init__(self, net_hosts, group_ip, mcast_port, echo_port):
self.net_hosts = net_hosts
self.group_ip = group_ip
self.mcast_port = mcast_port
self.echo_port = echo_port
self.src_process = None
self.receiver_applications = []
self.event_list = None
self.past_event_list = None
self.trial_start_time = 0
def generate_receiver_events(self, trial_start_time, trial_duration_seconds, num_receivers_at_time_zero, arrival_rate, service_rate):
"""Generates receiver init and termination events.
Receiver initialization events are generated as a poission process with arrival rate: arrival_rate (in receivers per second).
Each receiver has an exponential service time with rate: service_rate.
This should be called at the start of a simulation run, just after initialization of mininet.
"""
if self.event_list is not None:
return
self.event_list = []
self.past_event_list = []
self.trial_start_time = trial_start_time
# First, generate the initial receievers (active at time 0)
for i in range(0, num_receivers_at_time_zero):
# Generate a service time
service_time = expon(loc = 0, scale=(1.0 / service_rate)).rvs(1)[0]
# Select a host through a uniform random distribution
receiver = self.net_hosts[randint(0,len(self.net_hosts))]
receiver = MulticastReceiverApplication(receiver, self.group_ip, self.mcast_port, self.echo_port, trial_start_time, service_time)
self.receiver_applications.append(receiver)
self.event_list.append((trial_start_time, DynamicMulticastGroupDefinition.EVENT_RECEIVER_INIT, receiver))
self.event_list.append((trial_start_time + service_time, DynamicMulticastGroupDefinition.EVENT_RECEIVER_TERMINATION, receiver))
# Alternative: Generate inter-arrival times using exponential distribution
arrival_times = []
expo_rv = expon(loc = 0, scale=(1.0 / arrival_rate))
last_arrival_time = 0
while last_arrival_time < trial_duration_seconds:
next_arrival_time = expo_rv.rvs(1)[0] + last_arrival_time
if next_arrival_time < trial_duration_seconds:
arrival_times.append(next_arrival_time)
last_arrival_time = next_arrival_time
# Alternative Method
# Find the number of arrivals in the interval [0, trial_duration_seconds]
# Size = trial_duration_seconds, since we want the number of arrivals in trial_duration_seconds time units
# num_arrivals = sum(poisson.rvs(arrival_rate, size=trial_duration_seconds))
# Once the number of arrivals is known, generate arrival times uniform on [0, trial_duration_seconds]
# arrival_times = []
# for i in range(0, num_arrivals):
# arrival_times.append(uniform(0, trial_duration_seconds))
# Now, for each arrival, generate a corresponding receiver application and events
for arrival_time in arrival_times:
# Generate a service time
service_time = expon(loc = 0, scale=(1.0 / service_rate)).rvs(1)[0]
# Select a host through a uniform random distribution
receiver = self.net_hosts[randint(0,len(self.net_hosts))]
receiver = MulticastReceiverApplication(receiver, self.group_ip, self.mcast_port, self.echo_port, trial_start_time + arrival_time, service_time)
self.receiver_applications.append(receiver)
self.event_list.append((trial_start_time + arrival_time, DynamicMulticastGroupDefinition.EVENT_RECEIVER_INIT, receiver))
self.event_list.append((trial_start_time + arrival_time + service_time, DynamicMulticastGroupDefinition.EVENT_RECEIVER_TERMINATION, receiver))
# Sort the event list by time
self.event_list = sorted(self.event_list, key=lambda tup: tup[0])
# Debug printing
for event in self.event_list:
print 'Time:' + str(event[0]) + ' ' + str(event[1]) + ' ' + str(event[2])
def launch_sender_application(self):
"""Launches the group sender application.
This should be called at the start of a simulation run, after mininet is initialized.
"""
if self.src_process is None:
with open(os.devnull, "w") as fnull:
# seek_time = str(int(uniform(0, MEDIA_DURATION_SECONDS)))
# print 'Starting sender for group ' + str(self.group_ip) + ' with seek offset: ' + seek_time + ' seconds.'
# vlc_command = ['vlc-wrapper', 'test_media.mp4', '-I', 'dummy', '--sout', '"#rtp{access=udp, mux=ts, proto=udp, dst=' + self.group_ip + ', port=' + str(self.mcast_port) + '}"', '--sout-keep', '--loop', '--start-time', seek_time]
vlc_command = ['vlc-wrapper', 'test_media.mp4', '-I', 'dummy', '--sout', '"#rtp{access=udp, mux=ts, proto=udp, dst=' + self.group_ip + ', port=' + str(self.mcast_port) + '}"', '--sout-keep', '--loop']
sender = self.net_hosts[randint(0,len(self.net_hosts))]
print 'Sending host for group ' + str(self.group_ip) + ': ' + str(sender)
print 'Running VLC launch command: ' + ' '.join(vlc_command)
self.src_process = sender.popen(' '.join(vlc_command), stdout=fnull, stderr=fnull, close_fds=True, shell=True)
def update_receiver_applications(self, current_time):
"""Launches/terminates receiver applications as specified by the current time and the event_list attribute."""
while len(self.event_list) > 0 and self.event_list[0][0] <= current_time:
event = self.event_list.pop(0)
if event[1] == DynamicMulticastGroupDefinition.EVENT_RECEIVER_INIT:
print 'LAUNCH: Receiver ' + str(event[2]) + ' at time: ' + str(event[0]) + ' (Sim time: ' + str(event[0] - self.trial_start_time) + ')'
event[2].launch_receiver_application()
elif event[1] == DynamicMulticastGroupDefinition.EVENT_RECEIVER_TERMINATION:
event[2].terminate_receiver_application()
print 'TERMINATE: Receiver ' + str(event[2]) + ' at time: ' + str(event[0]) + ' (Sim time: ' + str(event[0] - self.trial_start_time) + ')'
# print 'Service Time: ' + str(time() - event[2].init_time)
self.past_event_list.append(event)
def terminate_group(self):
"""Terminates the sender application, as well as any receiver applications which are currently running.
This should be called at the end of a simulation run, before terminating mininet.
"""
if self.src_process is not None:
# print 'Killing process with PID: ' + str(self.src_process.pid)
self.src_process.terminate()
self.src_process.kill()
# TODO: Kill any receivers still running
for recv_app in self.receiver_applications:
if recv_app.app_state == MulticastReceiverApplication.APP_STATE_RUNNING:
recv_app.terminate_receiver_application()
def get_total_recv_packets(self):
return sum(recv_app.get_recv_packets() for recv_app in self.receiver_applications)
def get_total_lost_packets(self):
return sum(recv_app.get_lost_packets() for recv_app in self.receiver_applications)
def get_next_receiver_event(self):
"""Returns the receiver event at the head of the event list (or None if the list is empty)."""
if len(self.event_list) > 0:
return self.event_list[0]
else:
return None
def get_num_active_receivers(self, time):
"""Returns the number of receivers active at the provided time."""
num_receivers = 0
for event in self.event_list:
if event[0] <= time and event[1] == DynamicMulticastGroupDefinition.EVENT_RECEIVER_INIT:
num_receivers += 1
if event[0] <= time and event[1] == DynamicMulticastGroupDefinition.EVENT_RECEIVER_TERMINATION:
num_receivers -= 1
for event in self.past_event_list:
if event[0] <= time and event[1] == DynamicMulticastGroupDefinition.EVENT_RECEIVER_INIT:
num_receivers += 1
if event[0] <= time and event[1] == DynamicMulticastGroupDefinition.EVENT_RECEIVER_TERMINATION:
num_receivers -= 1
return num_receivers
def write_dynamic_stats_log(log_path, flow_stats_file_path, event_log_file_path, test_groups, topography, recv_arrival_rate,
recv_service_rate, num_init_receivers, trial_start_time, trial_end_time, stats_interval):
def write_current_time_interval(log_file, test_groups, link_bandwidth_usage_Mbps, switch_num_flows, switch_average_load, response_times, time_index, current_time, trial_start_time):
link_bandwidth_list = []
total_num_flows = 0
for switch_dpid in link_bandwidth_usage_Mbps:
for port_no in link_bandwidth_usage_Mbps[switch_dpid]:
link_bandwidth_list.append(link_bandwidth_usage_Mbps[switch_dpid][port_no])
for switch_dpid in switch_num_flows:
total_num_flows += switch_num_flows[switch_dpid]
net_wide_avg_load = 0
for switch_dpid in switch_average_load:
net_wide_avg_load += switch_average_load[switch_dpid]
net_wide_avg_load = float(net_wide_avg_load) / len(switch_average_load)
avg_response_time = sum(response_times) / float(len(response_times))
avg_network_time = sum(network_times) / float(len(network_times))
avg_processing_time = sum(processing_times) / float(len(processing_times))
average_link_bandwidth_usage = sum(link_bandwidth_list) / float(len(link_bandwidth_list))
traffic_concentration = 0
if average_link_bandwidth_usage != 0:
traffic_concentration = max(link_bandwidth_list) / average_link_bandwidth_usage
link_util_std_dev = tstd(link_bandwidth_list)
num_receivers = 0
for group in test_groups:
num_receivers += group.get_num_active_receivers(current_time)
log_file.write('TimeIndex:' + str(time_index) + ' SimTime:' + str(current_time - trial_start_time))
log_file.write(' TotalNumFlows:' + str(total_num_flows))
log_file.write(' MaxLinkUsageMbps:' + str(max(link_bandwidth_list)))
log_file.write(' AvgLinkUsageMbps:' + str(average_link_bandwidth_usage))
log_file.write(' TrafficConcentration:' + str(traffic_concentration))
log_file.write(' LinkUsageStdDev:' + str(link_util_std_dev))
log_file.write(' ResponseTime:' + str(avg_response_time))
log_file.write(' NetworkTime:' + str(avg_network_time))
log_file.write(' ProcessingTime:' + str(avg_processing_time))
log_file.write(' SwitchAvgLoadMbps:' + str(net_wide_avg_load))
log_file.write(' NumActiveReceivers:' + str(num_receivers))
log_file.write('\n')
switch_num_flows = {} # Dictionary of number of currently installed flows, keyed by switch_dpid
switch_average_load = {} # Dictionary of switch average load, keyed by switch_dpid
link_bandwidth_usage_Mbps = {} # Dictionary of dictionaries: link_bandwidth_usage_Mbps[switch_dpid][port_no]
cur_switch_dpid = None # Stores the DPID of the switch for which statistics are currently being read
# Generate a list of time points for which statistics should be recorded
next_time_interval = trial_start_time + stats_interval
time_intervals = []
while next_time_interval < trial_end_time:
time_intervals.append(next_time_interval)
next_time_interval = next_time_interval + stats_interval
print 'Recording statistics for ' + str(len(time_intervals)) + ' time intervals.'
cur_time_interval_index = 0
cur_time = 0
# Calculate packet loss and receivers at trial start
recv_packets = 0
lost_packets = 0
num_receievers_at_start = 0
for group in test_groups:
recv_packets += group.get_total_recv_packets()
lost_packets += group.get_total_lost_packets()
num_receievers_at_start += group.get_num_active_receivers(trial_start_time)
packet_loss = 0
if recv_packets + lost_packets != 0:
packet_loss = (float(lost_packets) / (recv_packets + lost_packets)) * 100
# Write out scenario params, and any statistics which cover the entire trial duration (ex. packet loss)
final_log_file = open(log_path, 'w')
final_log_file.write('GroupFlow Performance Simulation: ' + str(datetime.now()) + '\n')
final_log_file.write('FlowStatsLogFile:' + str(flow_stats_file_path) + '\n')
final_log_file.write('EventTraceLogFile:' + str(event_log_file_path) + '\n')
final_log_file.write('TrialStartTime:' + str(trial_start_time) + ' TrialEndTime:' + str(trial_end_time) + ' TrialDuration:' + str(trial_end_time - trial_start_time) + '\n')
final_log_file.write('NumberOfGroups:' + str(len(test_groups)) + ' InitReceiversPerGroup:' + str(num_init_receivers) + ' ReceiverArrivalRate:' + str(recv_arrival_rate)
+ ' ReceiverServiceRate:' + str(recv_service_rate) + ' TotalInitReceivers:' + str(num_receievers_at_start) + '\n')
final_log_file.write('Topology:' + str(topography) + ' NumSwitches:' + str(len(topography.switches())) + ' NumLinks:' + str(len(topography.links())) + ' NumHosts:' + str(len(topography.hosts())) + '\n')
final_log_file.write('RecvPackets:' + str(recv_packets) + ' LostPackets:' + str(lost_packets) + ' AvgPacketLoss:' + str(packet_loss) + '\n\n')
flow_log_file = open(flow_stats_file_path, 'r')
response_times = []
network_times = []
processing_times = []
for line in flow_log_file:
# This line specifies that start of stats for a new switch and time instant
if 'PortStats' in line:
line_split = line.split()
switch_dpid = line_split[1][len('Switch:'):]
num_flows = int(line_split[2][len('NumFlows:'):])
cur_time = float(line_split[4][len('IntervalEndTime:'):])
response_time = float(line_split[5][len('ResponseTime:'):])
network_time = float(line_split[6][len('NetworkTime:'):])
processing_time = float(line_split[7][len('ProcessingTime:'):])
avg_load = float(line_split[8][len('AvgSwitchLoad:'):])
cur_switch_dpid = switch_dpid
# First, check to see if this time falls under a new statistics interval, and record the current stats to file if so
if cur_time_interval_index < len(time_intervals) and cur_time > time_intervals[cur_time_interval_index]:
cur_time_interval_index += 1
write_current_time_interval(final_log_file, test_groups, link_bandwidth_usage_Mbps, switch_num_flows, switch_average_load, response_times, cur_time_interval_index - 1,
time_intervals[cur_time_interval_index - 1], trial_start_time)
response_times = []
network_times = []
processing_times = []
response_times.append(response_time)
network_times.append(network_time)
processing_times.append(processing_time)
switch_num_flows[cur_switch_dpid] = num_flows
switch_average_load[cur_switch_dpid] = avg_load
# This line specifies port specific stats for the last referenced switch
if 'PSPort' in line:
line_split = line.split()
port_no = int(line_split[0][len('PSPort:'):])
bandwidth_usage = float(line_split[3][len('AvgBandwidth:'):])
if(port_no == 65533):
# Ignore connections to the controller for these calculations
continue
if cur_switch_dpid not in link_bandwidth_usage_Mbps:
link_bandwidth_usage_Mbps[cur_switch_dpid] = {}
link_bandwidth_usage_Mbps[cur_switch_dpid][port_no] = bandwidth_usage
# Print the stats for the final multicast group
# write_current_time_interval(final_log_file, link_bandwidth_usage_Mbps, switch_num_flows, switch_average_load, response_times, cur_group_index - 1, test_groups[cur_group_index - 1])
flow_log_file.close()
final_log_file.close()
class StaticMulticastGroupDefinition(object):
"""Class used to manage the launch and termination of a single group of multicast applications with static membership.
Multicast groups managed by this class have the following properties:
* Each group has a single sender, which is an instance of VLC streaming a file named "test_media.mp4" over UDP
* The group may have an arbitrary number of receivers, which are all instances of multicast_receiver_VLC.py
* The group sender and all receivers are all initialized at the same time, and are all terminated at the same time
"""
def __init__(self, src_host, dst_hosts, group_ip, mcast_port, echo_port):
self.src_host = src_host
self.dst_hosts = dst_hosts
self.group_ip = group_ip
self.mcast_port = mcast_port
self.echo_port = echo_port
self.receiver_log_files = [] # Stores filenames
self.receiver_log_stats = [] # Stores ReceiverLogStats objects
self.src_process = None
self.dst_processes = []
def launch_mcast_applications(self, net):
# print 'Initializing multicast group ' + str(self.group_ip) + ':' + str(self.mcast_port) + ' Echo port: ' + str(self.echo_port)
with open(os.devnull, "w") as fnull:
# self.src_process = net.get(self.src_host).popen(['python', './multicast_sender.py', self.group_ip, str(self.mcast_port), str(self.echo_port)], stdout=fnull, stderr=fnull, close_fds=True)
vlc_command = ['vlc-wrapper', 'test_media.mp4', '-I', 'dummy', '--sout', '"#rtp{access=udp, mux=ts, proto=udp, dst=' + self.group_ip + ', port=' + str(self.mcast_port) + '}"', '--sout-keep', '--loop']
# print 'Running: ' + ' '.join(vlc_command)
self.src_process = net.get(self.src_host).popen(' '.join(vlc_command), stdout=fnull, stderr=fnull, close_fds=True, shell=True)
for dst in self.dst_hosts:
recv_log_filename = 'mcastlog_' + str(self.group_ip.replace('.', '_')) + '_' + str(dst) + '.log'
with open(os.devnull, "w") as fnull:
# self.dst_processes.append(net.get(dst).popen(['python', './multicast_receiver.py', self.group_ip, str(self.mcast_port), str(self.echo_port)], stdout=fnull, stderr=fnull, close_fds=True))
vlc_rcv_command = ['python', './multicast_receiver_VLC.py', self.group_ip, str(self.mcast_port), str(self.echo_port), str(recv_log_filename)]
# print 'Running: ' + ' '.join(vlc_rcv_command)
self.dst_processes.append(net.get(dst).popen(vlc_rcv_command, stdout=fnull, stderr=fnull, close_fds=True, shell=False))
self.receiver_log_files.append(recv_log_filename)
print('Initialized multicast group ' + str(self.group_ip) + ':' + str(self.mcast_port)
+ ' Echo port: ' + str(self.echo_port) + ' # Receivers: ' + str(len(self.dst_processes)))
def terminate_mcast_applications(self):
if self.src_process is not None:
# print 'Killing process with PID: ' + str(self.src_process.pid)
# os.killpg(self.src_process.pid, signal.SIGTERM)
self.src_process.terminate()
self.src_process.kill()
for proc in self.dst_processes:
# print 'Killing process with PID: ' + str(proc.pid)
proc.send_signal(signal.SIGINT)
# proc.terminate()
# proc.kill()
print 'Signaled termination of multicast group ' + str(self.group_ip) + ':' + str(self.mcast_port) + ' Echo port: ' + str(self.echo_port)
def wait_for_application_termination(self):
if self.src_process is not None:
self.src_process.wait()
self.src_process = None
for proc in self.dst_processes:
proc.wait()
for filename in self.receiver_log_files:
log_file = open(filename, 'r')
for line in log_file:
if 'RecvPackets:' in line:
line_split = line.split(' ')
recv_packets = int(line_split[0][len('RecvPackets:'):])
recv_bytes = int(line_split[1][len('RecvBytes:'):])
lost_packets = int(line_split[2][len('LostPackets:'):])
log_stats = ReceiverLogStats(str(filename), recv_bytes, recv_packets, lost_packets)
#log_stats.debug_print()
self.receiver_log_stats.append(log_stats)
break
log_file.close()
# print 'Read ' + filename
os.remove(filename)
self.dst_processes = []
def get_total_recv_packets(self):
return sum(log.recv_packets for log in self.receiver_log_stats)
def get_total_lost_packets(self):
return sum(log.lost_packets for log in self.receiver_log_stats)
def generate_group_membership_probabilities(hosts, mean, std_dev, avg_group_size = 0):
num_hosts = len(hosts)
a , b = a, b = (0 - mean) / std_dev, (1 - mean) / std_dev
midpoint_ab = (b + a) / 2
scale = 1 / (b - a)
location = 0.5 - (midpoint_ab * scale)
rv = truncnorm(a, b, loc=location, scale=scale)
rvs = rv.rvs(num_hosts)
if avg_group_size > 0:
rvs_sum = sum(rvs)
rvs = [p / (rvs_sum/float(avg_group_size)) for p in rvs]
rvs_sum = sum(rvs)
rvs = [p / (rvs_sum/float(avg_group_size)) for p in rvs]
prob_tuples = []
for index, host in enumerate(hosts):
prob_tuples.append((host, rvs[index]))
return prob_tuples
def write_final_stats_log(final_log_path, flow_stats_file_path, event_log_file_path, membership_mean, membership_std_dev, membership_avg_bound, test_groups, group_launch_times, topography):
def write_current_stats(log_file, link_bandwidth_usage_Mbps, switch_num_flows, switch_average_load, response_times, cur_group_index, group):
link_bandwidth_list = []
total_num_flows = 0
for switch_dpid in link_bandwidth_usage_Mbps:
for port_no in link_bandwidth_usage_Mbps[switch_dpid]:
link_bandwidth_list.append(link_bandwidth_usage_Mbps[switch_dpid][port_no])
for switch_dpid in switch_num_flows:
total_num_flows += switch_num_flows[switch_dpid]
net_wide_avg_load = 0
for switch_dpid in switch_average_load:
net_wide_avg_load += switch_average_load[switch_dpid]
net_wide_avg_load = float(net_wide_avg_load) / len(switch_average_load)
avg_response_time = sum(response_times) / float(len(response_times))
avg_network_time = sum(network_times) / float(len(network_times))
avg_processing_time = sum(processing_times) / float(len(processing_times))
average_link_bandwidth_usage = sum(link_bandwidth_list) / float(len(link_bandwidth_list))
traffic_concentration = 0
if average_link_bandwidth_usage != 0:
traffic_concentration = max(link_bandwidth_list) / average_link_bandwidth_usage
link_util_std_dev = tstd(link_bandwidth_list)
log_file.write('Group:' + str(cur_group_index))
log_file.write(' NumReceivers:' + str(len(group.dst_hosts)))
log_file.write(' TotalNumFlows:' + str(total_num_flows))
log_file.write(' MaxLinkUsageMbps:' + str(max(link_bandwidth_list)))
log_file.write(' AvgLinkUsageMbps:' + str(average_link_bandwidth_usage))
log_file.write(' TrafficConcentration:' + str(traffic_concentration))
log_file.write(' LinkUsageStdDev:' + str(link_util_std_dev))
log_file.write(' ResponseTime:' + str(avg_response_time))
log_file.write(' NetworkTime:' + str(avg_network_time))
log_file.write(' ProcessingTime:' + str(avg_processing_time))
log_file.write(' SwitchAvgLoadMbps:' + str(net_wide_avg_load))
log_file.write('\n')
switch_num_flows = {} # Dictionary of number of currently installed flows, keyed by switch_dpid
switch_average_load = {} # Dictionary of switch average load, keyed by switch_dpid
link_bandwidth_usage_Mbps = {} # Dictionary of dictionaries: link_bandwidth_usage_Mbps[switch_dpid][port_no]
cur_group_index = 0
cur_time = 0
cur_switch_dpid = None
final_log_file = open(final_log_path, 'w')
# Write out scenario params
num_receivers_list = []
for group in test_groups:
num_receivers_list.append(len(group.dst_hosts))
avg_num_receivers = sum(num_receivers_list) / float(len(num_receivers_list))
# Calculate packet loss
recv_packets = 0
lost_packets = 0
for group in test_groups:
recv_packets += group.get_total_recv_packets()
lost_packets += group.get_total_lost_packets()
packet_loss = 0
if recv_packets + lost_packets != 0:
packet_loss = (float(lost_packets) / (recv_packets + lost_packets)) * 100
final_log_file.write('GroupFlow Performance Simulation: ' + str(datetime.now()) + '\n')
final_log_file.write('FlowStatsLogFile:' + str(flow_stats_file_path) + '\n')
final_log_file.write('EventTraceLogFile:' + str(event_log_file_path) + '\n')
final_log_file.write('Membership Mean:' + str(membership_mean) + ' StdDev:' + str(membership_std_dev) + ' AvgBound:' + str(membership_avg_bound) + ' NumGroups:' + str(len(test_groups) - 1) + ' AvgNumReceivers:' + str(avg_num_receivers) + '\n')
final_log_file.write('Topology:' + str(topography) + ' NumSwitches:' + str(len(topography.switches())) + ' NumLinks:' + str(len(topography.links())) + ' NumHosts:' + str(len(topography.hosts())) + '\n')
final_log_file.write('RecvPackets:' + str(recv_packets) + ' LostPackets:' + str(lost_packets) + ' AvgPacketLoss:' + str(packet_loss) + '\n\n')
flow_log_file = open(flow_stats_file_path, 'r')
response_times = []
network_times = []
processing_times = []
for line in flow_log_file:
# This line specifies that start of stats for a new switch and time instant
if 'PortStats' in line:
line_split = line.split()
switch_dpid = line_split[1][len('Switch:'):]
num_flows = int(line_split[2][len('NumFlows:'):])
cur_time = float(line_split[4][len('IntervalEndTime:'):])
response_time = float(line_split[5][len('ResponseTime:'):])
network_time = float(line_split[6][len('NetworkTime:'):])
processing_time = float(line_split[7][len('ProcessingTime:'):])
avg_load = float(line_split[8][len('AvgSwitchLoad:'):])
cur_switch_dpid = switch_dpid
# print 'Got stats for switch: ' + str(switch_dpid)
# print 'Cur Time: ' + str(cur_time) + ' Next Group Launch: ' + str(group_launch_times[cur_group_index])
# First, check to see if a new group has been initialized before this time, and log the current flow stats if so
if cur_group_index < len(group_launch_times) and cur_time > group_launch_times[cur_group_index]:
cur_group_index += 1
if(cur_group_index > 1):
write_current_stats(final_log_file, link_bandwidth_usage_Mbps, switch_num_flows, switch_average_load, response_times, cur_group_index - 2, test_groups[cur_group_index - 2])
response_times = []
network_times = []
processing_times = []
response_times.append(response_time)
network_times.append(network_time)
processing_times.append(processing_time)
switch_num_flows[cur_switch_dpid] = num_flows
switch_average_load[cur_switch_dpid] = avg_load
# This line specifies port specific stats for the last referenced switch
if 'PSPort' in line:
line_split = line.split()
port_no = int(line_split[0][len('PSPort:'):])
bandwidth_usage = float(line_split[3][len('AvgBandwidth:'):])
if(port_no == 65533):
# Ignore connections to the controller for these calculations
continue
if cur_switch_dpid not in link_bandwidth_usage_Mbps:
link_bandwidth_usage_Mbps[cur_switch_dpid] = {}
link_bandwidth_usage_Mbps[cur_switch_dpid][port_no] = bandwidth_usage
# Print the stats for the final multicast group
write_current_stats(final_log_file, link_bandwidth_usage_Mbps, switch_num_flows, switch_average_load, response_times, cur_group_index - 1, test_groups[cur_group_index - 1])
flow_log_file.close()
final_log_file.close()
class BriteTopo(Topo):
def __init__(self, brite_filepath):
# Initialize topology
Topo.__init__( self )
self.hostnames = []
self.switch_names = []
self.routers = []
self.edges = []
self.file_path = brite_filepath
print 'Parsing BRITE topology at filepath: ' + str(brite_filepath)
file = open(brite_filepath, 'r')
line = file.readline()
print 'BRITE ' + line
# Skip ahead until the nodes section is reached
in_node_section = False
while not in_node_section:
line = file.readline()
if 'Nodes:' in line:
in_node_section = True
break
# In the nodes section now, generate a switch and host for each node
while in_node_section:
line = file.readline().strip()
if not line:
in_node_section = False
print 'Finished parsing nodes'
break
line_split = line.split('\t')
node_id = int(line_split[0])
print 'Generating switch and host for ID: ' + str(node_id)
switch = self.addSwitch('s' + str(node_id), inband = False)
host = self.addHost('h' + str(node_id), ip = '10.0.0.' + str(node_id + 1))
self.addLink(switch, host, bw=1000, use_htb=True) # TODO: Better define link parameters for hosts
self.routers.append(switch)
self.switch_names.append('s' + str(node_id))
self.hostnames.append('h' + str(node_id))
# Skip ahead to the edges section
in_edge_section = False
while not in_edge_section:
line = file.readline()
if 'Edges:' in line:
in_edge_section = True
break
# In the edges section now, add all required links
while in_edge_section:
line = file.readline().strip()
if not line: # Empty string
in_edge_section = False
print 'Finished parsing edges'
break
line_split = line.split('\t')
switch_id_1 = int(line_split[1])
switch_id_2 = int(line_split[2])
delay_ms = str(float(line_split[4])) + 'ms'
self.edges.append(('s' + str(switch_id_1), 's' + str(switch_id_2), float(line_split[4])))
self.edges.append(('s' + str(switch_id_2), 's' + str(switch_id_1), float(line_split[4])))
bandwidth_Mbps = float(line_split[5])
print 'Adding link between switch ' + str(switch_id_1) + ' and ' + str(switch_id_2) + '\n\tRate: ' \
+ str(bandwidth_Mbps) + ' Mbps\tDelay: ' + delay_ms
# params = {'bw':bandwidth_Mbps, 'delay':delay_ms}]
# TODO: Figure out why setting the delay won't work
self.addLink(self.routers[switch_id_1], self.routers[switch_id_2], bw=bandwidth_Mbps, delay=delay_ms, max_queue_size=1000, use_htb=True)
file.close()
def get_controller_placement(self, latency_metric = LATENCY_METRIC_MIN_AVERAGE_DELAY):
delay_metric_value = sys.float_info.max
source_node_id = None
for src_switch in self.routers:
# Compute the shortest path tree for each possible controller placement
nodes = set(self.routers)
graph = defaultdict(list)
for src,dst,cost in self.edges:
graph[src].append((cost, dst))
path_tree_map = defaultdict(lambda : None)
queue, seen = [(0,src_switch,())], set()
while queue:
(cost,node1,path) = heappop(queue)
if node1 not in seen:
seen.add(node1)
path = (cost, node1, path)
path_tree_map[node1] = path
for next_cost, node2 in graph.get(node1, ()):
if node2 not in seen:
heappush(queue, (cost + next_cost, node2, path))
# Calculate the metric value for this position
if latency_metric == LATENCY_METRIC_MIN_AVERAGE_DELAY:
sum_delay = 0
for receiver in path_tree_map:
sum_delay += path_tree_map[receiver][0]
avg_delay = sum_delay / float(len(path_tree_map))
if avg_delay < delay_metric_value:
source_node_id = src_switch
delay_metric_value = avg_delay
elif latency_metric == LATENCY_METRIC_MIN_MAXIMUM_DELAY:
max_delay = 0
for receiver in path_tree_map:
if path_tree_map[receiver][0] > max_delay:
max_delay = path_tree_map[receiver][0]
if max_delay < delay_metric_value:
source_node_id = src_switch
delay_metric_value = max_delay
print 'Found best controller placement at ' + str(source_node_id) + ' with metric: ' + str(delay_metric_value)
return source_node_id, delay_metric_value
def get_host_list(self):
return self.hostnames
def get_switch_list(self):
return self.switch_names
def mcastConfig(self, net):
for hostname in self.hostnames:
net.get(hostname).cmd('route add -net 224.0.0.0/4 ' + hostname + '-eth0')
def __str__(self):
return self.file_path
class ManhattanGridTopo(Topo):
def __init__(self, grid_x, grid_y, link_Mbps, link_delay_ms, edge_interconnect = False):
# Initialize topology
Topo.__init__( self )
self.hostnames = []
self.switch_names = []
self.routers = []
self.grid_routers = {} # Stores the same objects as self.routers, but keyed as a 2 dimensional map (self.grid_routers[x_coord][y_coord])
self.edges = []
print 'Generating Manhattan Grid Topology with Parameters:'
print 'Grid X: ' + str(grid_x) + ' Grid Y: ' + str(grid_y) + ' TotalNumSwitches: ' + str(grid_x * grid_y)
print 'Link Bandwidth: ' + str(link_Mbps) + ' Mbps \tLink Delay: ' + str(link_delay_ms) + ' ms'
print 'Edge Interconnect: ' + str(edge_interconnect)
# Generate an X * Y grid of routers
host_id = 1
for x in range(0, grid_x):
for y in range(0, grid_y):
switch = self.addSwitch('s' + str(x) + str(y), inband = False)
host = self.addHost('h' + str(host_id), ip = '10.0.0.' + str(host_id))
self.addLink(switch, host, bw=1000, use_htb=True) # TODO: Better define link parameters for hosts
self.routers.append(switch)
if x not in self.grid_routers:
self.grid_routers[x] = {}
self.grid_routers[x][y] = switch
self.switch_names.append('s' + str(x) + str(y))
self.hostnames.append('h' + str(host_id))
host_id += 1
# Add links between all adjacent nodes (not including diagonal adjacencies)
for x in range(0, grid_x - 1):
for y in range(0, grid_y):
# Add the X direction link
self.edges.append(('s' + str(x) + str(y), 's' + str(x + 1) + str(y), link_delay_ms))
self.edges.append(('s' + str(x + 1) + str(y), 's' + str(x) + str(y), link_delay_ms))
print 'Adding link between switch ' + 's' + str(x) + str(y) + ' and ' + 's' + str(x + 1) + str(y) + '\n\tRate: ' \
+ str(link_Mbps) + ' Mbps\tDelay: ' + str(link_delay_ms)
self.addLink(self.grid_routers[x][y], self.grid_routers[x + 1][y], bw=link_Mbps, delay=str(link_delay_ms) + 'ms', max_queue_size=1000, use_htb=True)
for y in range(0, grid_y - 1):
for x in range(0, grid_x):
# Add the Y direction link
self.edges.append(('s' + str(x) + str(y), 's' + str(x) + str(y + 1), link_delay_ms))
self.edges.append(('s' + str(x) + str(y + 1), 's' + str(x) + str(y), link_delay_ms))
print 'Adding link between switch ' + 's' + str(x) + str(y) + ' and ' + 's' + str(x) + str(y + 1) + '\n\tRate: ' \
+ str(link_Mbps) + ' Mbps\tDelay: ' + str(link_delay_ms)
self.addLink(self.grid_routers[x][y], self.grid_routers[x][y + 1], bw=link_Mbps, delay=str(link_delay_ms) + 'ms', max_queue_size=1000, use_htb=True)
# Interconnect the grid edges if the edge_interconnect flag is set
if edge_interconnect:
for x in range(0, grid_x):
self.edges.append(('s' + str(x) + str(grid_y - 1), 's' + str(x) + str(0), link_delay_ms))
self.edges.append(('s' + str(x) + str(0), 's' + str(x) + str(grid_y - 1), link_delay_ms))
print 'Adding link between switch ' + 's' + str(x) + str(0) + ' and ' + 's' + str(x) + str(grid_y - 1) + '\n\tRate: ' \
+ str(link_Mbps) + ' Mbps\tDelay: ' + str(link_delay_ms)
self.addLink(self.grid_routers[x][0], self.grid_routers[x][grid_y - 1], bw=link_Mbps, delay=str(link_delay_ms) + 'ms', max_queue_size=1000, use_htb=True)
for y in range(0, grid_y):
self.edges.append(('s' + str(0) + str(y), 's' + str(grid_x - 1) + str(y), link_delay_ms))
self.edges.append(('s' + str(grid_x - 1) + str(y), 's' + str(0) + str(y), link_delay_ms))
print 'Adding link between switch ' + 's' + str(0) + str(y) + ' and ' + 's' + str(grid_x - 1) + str(y) + '\n\tRate: ' \
+ str(link_Mbps) + ' Mbps\tDelay: ' + str(link_delay_ms)
self.addLink(self.grid_routers[0][y], self.grid_routers[grid_x - 1][y], bw=link_Mbps, delay=str(link_delay_ms) + 'ms', max_queue_size=1000, use_htb=True)
def get_controller_placement(self, latency_metric = LATENCY_METRIC_MIN_AVERAGE_DELAY):
delay_metric_value = sys.float_info.max
source_node_id = None
for src_switch in self.routers:
# Compute the shortest path tree for each possible controller placement
nodes = set(self.routers)
graph = defaultdict(list)
for src,dst,cost in self.edges:
graph[src].append((cost, dst))
path_tree_map = defaultdict(lambda : None)
queue, seen = [(0,src_switch,())], set()
while queue:
(cost,node1,path) = heappop(queue)
if node1 not in seen:
seen.add(node1)
path = (cost, node1, path)
path_tree_map[node1] = path
for next_cost, node2 in graph.get(node1, ()):
if node2 not in seen:
heappush(queue, (cost + next_cost, node2, path))
# Calculate the metric value for this position
if latency_metric == LATENCY_METRIC_MIN_AVERAGE_DELAY:
sum_delay = 0
for receiver in path_tree_map:
sum_delay += path_tree_map[receiver][0]
avg_delay = sum_delay / float(len(path_tree_map))
if avg_delay < delay_metric_value:
source_node_id = src_switch
delay_metric_value = avg_delay
elif latency_metric == LATENCY_METRIC_MIN_MAXIMUM_DELAY:
max_delay = 0
for receiver in path_tree_map:
if path_tree_map[receiver][0] > max_delay:
max_delay = path_tree_map[receiver][0]
if max_delay < delay_metric_value:
source_node_id = src_switch
delay_metric_value = max_delay
print 'Found best controller placement at ' + str(source_node_id) + ' with metric: ' + str(delay_metric_value)
return source_node_id, delay_metric_value
def get_host_list(self):
return self.hostnames
def get_switch_list(self):
return self.switch_names
def mcastConfig(self, net):
for hostname in self.hostnames:
net.get(hostname).cmd('route add -net 224.0.0.0/4 ' + hostname + '-eth0')
class MulticastTestTopo( Topo ):
"Simple multicast testing example"
def __init__( self ):
"Create custom topo."
# Initialize topology
Topo.__init__( self )
# Add hosts and switches
h0 = self.addHost('h0', ip='10.0.0.1')
h1 = self.addHost('h1', ip='10.0.0.2')
h2 = self.addHost('h2', ip='10.0.0.3')
h3 = self.addHost('h3', ip='10.0.0.4')
h4 = self.addHost('h4', ip='10.0.0.5')
h5 = self.addHost('h5', ip='10.0.0.6')
h6 = self.addHost('h6', ip='10.0.0.7')
h7 = self.addHost('h7', ip='10.0.0.8')
h8 = self.addHost('h8', ip='10.0.0.9')
h9 = self.addHost('h9', ip='10.0.0.10')
h10 = self.addHost('h10', ip='10.0.0.11')
s0 = self.addSwitch('s0')
s1 = self.addSwitch('s1')
s2 = self.addSwitch('s2')
s3 = self.addSwitch('s3')
s4 = self.addSwitch('s4')
s5 = self.addSwitch('s5')
s6 = self.addSwitch('s6')
# Add links
self.addLink(s0, s1, bw = 10, use_htb = True)
self.addLink(s0, s2, bw = 10, use_htb = True)
self.addLink(s1, s3, bw = 10, use_htb = True)
self.addLink(s3, s4, bw = 10, use_htb = True)
self.addLink(s1, s4, bw = 10, use_htb = True)
self.addLink(s1, s5, bw = 10, use_htb = True)
self.addLink(s5, s2, bw = 10, use_htb = True)
self.addLink(s2, s6, bw = 10, use_htb = True)
self.addLink(s6, s4, bw = 10, use_htb = True)
self.addLink(s0, h0, bw = 10, use_htb = True)
self.addLink(s2, h1, bw = 10, use_htb = True)
self.addLink(s2, h2, bw = 10, use_htb = True)
self.addLink(s4, h3, bw = 10, use_htb = True)
self.addLink(s4, h4, bw = 10, use_htb = True)
self.addLink(s4, h5, bw = 10, use_htb = True)
self.addLink(s1, h6, bw = 10, use_htb = True)
self.addLink(s5, h7, bw = 10, use_htb = True)
self.addLink(s6, h8, bw = 10, use_htb = True)
self.addLink(s3, h9, bw = 10, use_htb = True)
self.addLink(s1, h10, bw = 10, use_htb = True)
def mcastConfig(self, net):
# Configure hosts for multicast support
net.get('h0').cmd('route add -net 224.0.0.0/4 h0-eth0')
net.get('h1').cmd('route add -net 224.0.0.0/4 h1-eth0')
net.get('h2').cmd('route add -net 224.0.0.0/4 h2-eth0')
net.get('h3').cmd('route add -net 224.0.0.0/4 h3-eth0')
net.get('h4').cmd('route add -net 224.0.0.0/4 h4-eth0')
net.get('h5').cmd('route add -net 224.0.0.0/4 h5-eth0')
net.get('h6').cmd('route add -net 224.0.0.0/4 h6-eth0')
net.get('h7').cmd('route add -net 224.0.0.0/4 h7-eth0')
net.get('h8').cmd('route add -net 224.0.0.0/4 h8-eth0')
net.get('h9').cmd('route add -net 224.0.0.0/4 h9-eth0')
net.get('h10').cmd('route add -net 224.0.0.0/4 h10-eth0')
def get_host_list(self):
return ['h0', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'h7', 'h8', 'h9', 'h10']
def get_switch_list(self):
return ['s0', 's1', 's2', 's3', 's4', 's5', 's6']
|
|
# -*- coding: utf-8 -*-
"""
A simple countdown timer.
This is a very basic countdown timer. You can change the timer length as well
as pausing, restarting and resetting it. Currently this is more of a demo of a
composite.
Each part of the timer can be changed independently hours, minutes, seconds using
mouse buttons 4 and 5 (scroll wheel).
Button 1 starts/pauses the countdown.
Button 2 resets timer.
Configuration parameters:
format: display format for this module (default 'Timer {timer}')
sound: play sound file path when the timer ends (default None)
time: number of seconds to start countdown with (default 60)
Format placeholders:
{timer} display hours:minutes:seconds
@author tobes
SAMPLE OUTPUT
{'full_text': 'Timer 0:01:00'}
running
[
{'full_text': 'Timer '},
{'color': '#00FF00', 'full_text': '0'},
{'full_text': ':'},
{'color': '#00FF00', 'full_text': '00'},
{'full_text': ':'},
{'color': '#00FF00', 'full_text': '54'},
]
paused
[
{'full_text': 'Timer '},
{'color': '#FFFF00', 'full_text': '0'},
{'full_text': ':'},
{'color': '#FFFF00', 'full_text': '00'},
{'full_text': ':'},
{'color': '#FFFF00', 'full_text': '54'},
]
"""
from time import time
from threading import Timer
class Py3status:
"""
"""
# available configuration parameters
format = 'Timer {timer}'
sound = None
time = 60
def post_config_hook(self):
self.running = False
self.end_time = None
self.time_left = None
self.color = None
self.alarm_timer = None
self.alarm = False
self.done = False
def _time_up(self):
"""
Called when the timer expires
"""
self.running = False
self.color = '#FF0000'
self.time_left = 0
self.done = True
if self.sound:
self.py3.play_sound(self.sound)
self.alarm = True
self.timer()
def timer(self):
def make_2_didget(value):
value = str(value)
if len(value) == 1:
value = '0' + value
return value
if self.running or self.done:
t = int(self.end_time - time())
if t <= 0:
t = 0
else:
if self.time_left:
t = self.time_left
else:
t = self.time
# Hours
hours, t = divmod(t, 3600)
# Minutes
mins, t = divmod(t, 60)
# Seconds
seconds = t
if self.running:
cached_until = self.py3.time_in(0, offset=self.cache_offset)
else:
cached_until = self.py3.CACHE_FOREVER
composites = [
{
'full_text': str(hours),
'color': self.color,
'index': 'hours',
},
{
'color': '#CCCCCC',
'full_text': ':',
},
{
'full_text': make_2_didget(mins),
'color': self.color,
'index': 'mins',
},
{
'color': '#CCCCCC',
'full_text': ':',
},
{
'full_text': make_2_didget(seconds),
'color': self.color,
'index': 'seconds',
},
]
timer = self.py3.composite_create(composites)
return {
'cached_until': cached_until,
'full_text': self.py3.safe_format(self.format, {'timer': timer})
}
def on_click(self, event):
deltas = {
'hours': 3600,
'mins': 60,
'seconds': 1
}
index = event['index']
button = event['button']
# If played an alarm sound then cancel the sound on any putton press
if self.alarm:
self.py3.stop_sound()
self.alarm = False
return
if button == 1:
if self.running:
# pause timer
self.running = False
self.time_left = int(self.end_time - time())
self.color = '#FFFF00'
if self.alarm_timer:
self.alarm_timer.cancel()
else:
# start/restart timer
self.running = True
if self.time_left:
self.end_time = time() + self.time_left
else:
self.end_time = time() + self.time
self.cache_offset = self.end_time % 1
self.color = '#00FF00'
if self.alarm_timer:
self.alarm_timer.cancel()
self.done = False
self.alarm_timer = Timer(self.time_left or self.time, self._time_up)
self.alarm_timer.start()
if button == 2:
self.running = False
self.time_left = None
self.color = None
self.done = False
if self.alarm_timer:
self.alarm_timer.cancel()
if not self.running:
self.done = False
# change timer section HH:MM:SS
if self.time_left:
t = self.time_left
else:
t = self.time
if button == 4:
t += deltas.get(index, 0)
if button == 5:
t -= deltas.get(index, 0)
if t < 0:
t = 0
if self.time_left:
self.time_left = t
else:
self.time = t
def kill(self):
# remove any timer
if self.alarm_timer:
self.alarm_timer.cancel()
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
|
# Copyright 2012, Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the share RPC API.
"""
from oslo_config import cfg
import oslo_messaging as messaging
from oslo_serialization import jsonutils
from manila import rpc
from manila.share import utils
CONF = cfg.CONF
class ShareAPI(object):
"""Client side of the share rpc API.
API version history:
1.0 - Initial version.
1.1 - Add manage_share() and unmanage_share() methods
1.2 - Add extend_share() method
1.3 - Add shrink_share() method
1.4 - Introduce Share Instances:
create_share() -> create_share_instance()
delete_share() -> delete_share_instance()
Add share_instance argument to allow_access() & deny_access()
1.5 - Add create_consistency_group, delete_consistency_group
create_cgsnapshot, and delete_cgsnapshot methods
1.6 - Introduce Share migration:
migrate_share()
get_migration_info()
get_driver_migration_info()
1.7 - Update target call API in allow/deny access methods
1.8 - Introduce Share Replication:
create_share_replica()
delete_share_replica()
promote_share_replica()
update_share_replica()
1.9 - Add manage_snapshot() and unmanage_snapshot() methods
1.10 - Add migration_complete(), migration_cancel() and
migration_get_progress(), rename migrate_share() to
migration_start(), rename get_migration_info() to
migration_get_info(), rename get_driver_migration_info() to
migration_get_driver_info()
1.11 - Add create_replicated_snapshot() and
delete_replicated_snapshot() methods
1.12 - Add provide_share_server(), create_share_server() and
migration_driver_recovery(), remove migration_get_driver_info(),
update migration_cancel(), migration_complete() and
migration_get_progress method signature, rename
migration_get_info() to connection_get_info()
"""
BASE_RPC_API_VERSION = '1.0'
def __init__(self, topic=None):
super(ShareAPI, self).__init__()
target = messaging.Target(topic=CONF.share_topic,
version=self.BASE_RPC_API_VERSION)
self.client = rpc.get_client(target, version_cap='1.12')
def create_share_instance(self, context, share_instance, host,
request_spec, filter_properties,
snapshot_id=None):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.4')
request_spec_p = jsonutils.to_primitive(request_spec)
call_context.cast(context,
'create_share_instance',
share_instance_id=share_instance['id'],
request_spec=request_spec_p,
filter_properties=filter_properties,
snapshot_id=snapshot_id)
def manage_share(self, context, share, driver_options=None):
host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=host, version='1.1')
call_context.cast(context,
'manage_share',
share_id=share['id'],
driver_options=driver_options)
def unmanage_share(self, context, share):
host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=host, version='1.1')
call_context.cast(context, 'unmanage_share', share_id=share['id'])
def manage_snapshot(self, context, snapshot, host,
driver_options=None):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.9')
call_context.cast(context,
'manage_snapshot',
snapshot_id=snapshot['id'],
driver_options=driver_options)
def unmanage_snapshot(self, context, snapshot, host):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.9')
call_context.cast(context,
'unmanage_snapshot',
snapshot_id=snapshot['id'])
def delete_share_instance(self, context, share_instance, force=False):
host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=host, version='1.4')
call_context.cast(context,
'delete_share_instance',
share_instance_id=share_instance['id'],
force=force)
def migration_start(self, context, share, dest_host,
force_host_assisted_migration, preserve_metadata,
writable, nondisruptive, new_share_network_id,
new_share_type_id):
new_host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
call_context.cast(
context,
'migration_start',
share_id=share['id'],
dest_host=dest_host,
force_host_assisted_migration=force_host_assisted_migration,
preserve_metadata=preserve_metadata,
writable=writable,
nondisruptive=nondisruptive,
new_share_network_id=new_share_network_id,
new_share_type_id=new_share_type_id)
def connection_get_info(self, context, share_instance):
new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
return call_context.call(context,
'connection_get_info',
share_instance_id=share_instance['id'])
def delete_share_server(self, context, share_server):
host = utils.extract_host(share_server['host'])
call_context = self.client.prepare(server=host, version='1.0')
call_context.cast(context,
'delete_share_server',
share_server=share_server)
def create_snapshot(self, context, share, snapshot):
host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=host)
call_context.cast(context,
'create_snapshot',
share_id=share['id'],
snapshot_id=snapshot['id'])
def delete_snapshot(self, context, snapshot, host, force=False):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host)
call_context.cast(context,
'delete_snapshot',
snapshot_id=snapshot['id'],
force=force)
def create_replicated_snapshot(self, context, share, replicated_snapshot):
host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=host, version='1.11')
call_context.cast(context,
'create_replicated_snapshot',
snapshot_id=replicated_snapshot['id'],
share_id=share['id'])
def delete_replicated_snapshot(self, context, replicated_snapshot, host,
share_id=None, force=False):
host = utils.extract_host(host)
call_context = self.client.prepare(server=host, version='1.11')
call_context.cast(context,
'delete_replicated_snapshot',
snapshot_id=replicated_snapshot['id'],
share_id=share_id,
force=force)
@staticmethod
def _get_access_rules(access):
if isinstance(access, list):
return [rule['id'] for rule in access]
else:
return [access['id']]
def allow_access(self, context, share_instance, access):
host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=host, version='1.7')
call_context.cast(context,
'allow_access',
share_instance_id=share_instance['id'],
access_rules=self._get_access_rules(access))
def deny_access(self, context, share_instance, access):
host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=host, version='1.7')
call_context.cast(context,
'deny_access',
share_instance_id=share_instance['id'],
access_rules=self._get_access_rules(access))
def publish_service_capabilities(self, context):
call_context = self.client.prepare(fanout=True, version='1.0')
call_context.cast(context, 'publish_service_capabilities')
def extend_share(self, context, share, new_size, reservations):
host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=host, version='1.2')
call_context.cast(context,
'extend_share',
share_id=share['id'],
new_size=new_size,
reservations=reservations)
def shrink_share(self, context, share, new_size):
host = utils.extract_host(share['instance']['host'])
call_context = self.client.prepare(server=host, version='1.3')
call_context.cast(context,
'shrink_share',
share_id=share['id'],
new_size=new_size)
def create_consistency_group(self, context, cg, host):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.5')
call_context.cast(context,
'create_consistency_group',
cg_id=cg['id'])
def delete_consistency_group(self, context, cg):
new_host = utils.extract_host(cg['host'])
call_context = self.client.prepare(server=new_host, version='1.5')
call_context.cast(context,
'delete_consistency_group',
cg_id=cg['id'])
def create_cgsnapshot(self, context, cgsnapshot, host):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.5')
call_context.cast(context,
'create_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])
def delete_cgsnapshot(self, context, cgsnapshot, host):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.5')
call_context.cast(context,
'delete_cgsnapshot',
cgsnapshot_id=cgsnapshot['id'])
def create_share_replica(self, context, share_replica, host,
request_spec, filter_properties):
new_host = utils.extract_host(host)
call_context = self.client.prepare(server=new_host, version='1.8')
request_spec_p = jsonutils.to_primitive(request_spec)
call_context.cast(context,
'create_share_replica',
share_replica_id=share_replica['id'],
request_spec=request_spec_p,
filter_properties=filter_properties,
share_id=share_replica['share_id'])
def delete_share_replica(self, context, share_replica, force=False):
host = utils.extract_host(share_replica['host'])
call_context = self.client.prepare(server=host, version='1.8')
call_context.cast(context,
'delete_share_replica',
share_replica_id=share_replica['id'],
share_id=share_replica['share_id'],
force=force)
def promote_share_replica(self, context, share_replica):
host = utils.extract_host(share_replica['host'])
call_context = self.client.prepare(server=host, version='1.8')
call_context.cast(context,
'promote_share_replica',
share_replica_id=share_replica['id'],
share_id=share_replica['share_id'])
def update_share_replica(self, context, share_replica):
host = utils.extract_host(share_replica['host'])
call_context = self.client.prepare(server=host, version='1.8')
call_context.cast(context,
'update_share_replica',
share_replica_id=share_replica['id'],
share_id=share_replica['share_id'])
def migration_complete(self, context, src_share_instance,
dest_instance_id):
new_host = utils.extract_host(src_share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
call_context.cast(context,
'migration_complete',
src_instance_id=src_share_instance['id'],
dest_instance_id=dest_instance_id)
def migration_cancel(self, context, src_share_instance, dest_instance_id):
new_host = utils.extract_host(src_share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
call_context.cast(context,
'migration_cancel',
src_instance_id=src_share_instance['id'],
dest_instance_id=dest_instance_id)
def migration_get_progress(self, context, src_share_instance,
dest_instance_id):
new_host = utils.extract_host(src_share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
return call_context.call(context,
'migration_get_progress',
src_instance_id=src_share_instance['id'],
dest_instance_id=dest_instance_id)
def provide_share_server(self, context, share_instance, share_network_id,
snapshot_id=None):
new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
return call_context.call(context,
'provide_share_server',
share_instance_id=share_instance['id'],
share_network_id=share_network_id,
snapshot_id=snapshot_id)
def create_share_server(self, context, share_instance, share_server_id):
new_host = utils.extract_host(share_instance['host'])
call_context = self.client.prepare(server=new_host, version='1.12')
call_context.cast(context,
'create_share_server',
share_server_id=share_server_id)
|
|
# Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Class to represent an AWS Virtual Machine object.
All VM specifics are self-contained and the class provides methods to
operate on the VM: boot, shutdown, etc.
"""
import json
import logging
import threading
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import flags
from perfkitbenchmarker import package_managers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.aws import aws_disk
from perfkitbenchmarker.aws import util
FLAGS = flags.FLAGS
flags.DEFINE_string('aws_user_name', 'ubuntu',
'This determines the user name that Perfkit will '
'attempt to use. This must be changed in order to '
'use any image other than ubuntu.')
HVM = 'HVM'
PV = 'PV'
NON_HVM_PREFIXES = ['m1', 'c1', 't1', 'm2']
US_EAST_1 = 'us-east-1'
US_WEST_1 = 'us-west-1'
US_WEST_2 = 'us-west-2'
EU_WEST_1 = 'eu-west-1'
AP_NORTHEAST_1 = 'ap-northeast-1'
AP_SOUTHEAST_1 = 'ap-southeast-1'
AP_SOUTHEAST_2 = 'ap-southeast-2'
SA_EAST_1 = 'sa-east-1'
AMIS = {
HVM: {
US_EAST_1: 'ami-acff23c4',
US_WEST_1: 'ami-05717d40',
US_WEST_2: 'ami-fbce8bcb',
EU_WEST_1: 'ami-30b46b47',
AP_NORTHEAST_1: 'ami-d186dcd0',
AP_SOUTHEAST_1: 'ami-9afca7c8',
AP_SOUTHEAST_2: 'ami-956706af',
SA_EAST_1: 'ami-9970d884',
},
PV: {
US_EAST_1: 'ami-d2ff23ba',
US_WEST_1: 'ami-73717d36',
US_WEST_2: 'ami-f1ce8bc1',
EU_WEST_1: 'ami-4ab46b3d',
AP_NORTHEAST_1: 'ami-c786dcc6',
AP_SOUTHEAST_1: 'ami-eefca7bc',
AP_SOUTHEAST_2: 'ami-996706a3',
SA_EAST_1: 'ami-6770d87a',
}
}
PLACEMENT_GROUP_PREFIXES = frozenset(
['c3', 'c4', 'cc2', 'cg1', 'g2', 'cr1', 'r3', 'hi1', 'i2'])
NUM_LOCAL_VOLUMES = {
'c1.medium': 1, 'c1.xlarge': 4,
'c3.large': 2, 'c3.xlarge': 2, 'c3.2xlarge': 2, 'c3.4xlarge': 2,
'c3.8xlarge': 2, 'cc2.8xlarge': 4,
'cg1.4xlarge': 2, 'cr1.8xlarge': 2, 'g2.2xlarge': 1,
'hi1.4xlarge': 2, 'hs1.8xlarge': 24,
'i2.xlarge': 1, 'i2.2xlarge': 2, 'i2.4xlarge': 4, 'i2.8xlarge': 8,
'm1.small': 1, 'm1.medium': 1, 'm1.large': 2, 'm1.xlarge': 4,
'm2.xlarge': 1, 'm2.2xlarge': 1, 'm2.4xlarge': 2,
'm3.medium': 1, 'm3.large': 1, 'm3.xlarge': 2, 'm3.2xlarge': 2,
'r3.large': 1, 'r3.xlarge': 1, 'r3.2xlarge': 1, 'r3.4xlarge': 1,
'r3.8xlarge': 2,
}
DRIVE_START_LETTER = 'b'
INSTANCE_EXISTS_STATUSES = frozenset(
['pending', 'running', 'stopping', 'stopped'])
INSTANCE_DELETED_STATUSES = frozenset(['shutting-down', 'terminated'])
INSTANCE_KNOWN_STATUSES = INSTANCE_EXISTS_STATUSES | INSTANCE_DELETED_STATUSES
def GetBlockDeviceMap(machine_type):
"""Returns the block device map to expose all devices for a given machine.
Args:
machine_type: The machine type to create a block device map for.
Returns:
The json representation of the block device map for a machine compatible
with the AWS CLI, or if the machine type has no local drives, it will
return None.
"""
if machine_type in NUM_LOCAL_VOLUMES:
mappings = [{'VirtualName': 'ephemeral%s' % i,
'DeviceName': '/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i)}
for i in xrange(NUM_LOCAL_VOLUMES[machine_type])]
return json.dumps(mappings)
else:
return None
def GetImage(machine_type, region):
"""Gets an ami compatible with the machine type and zone."""
prefix = machine_type.split('.')[0]
if prefix in NON_HVM_PREFIXES:
return AMIS[PV][region]
else:
return AMIS[HVM][region]
def IsPlacementGroupCompatible(machine_type):
"""Returns True if VMs of 'machine_type' can be put in a placement group."""
prefix = machine_type.split('.')[0]
return prefix in PLACEMENT_GROUP_PREFIXES
class AwsVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing an AWS Virtual Machine."""
_lock = threading.Lock()
imported_keyfile_set = set()
deleted_keyfile_set = set()
def __init__(self, vm_spec):
"""Initialize a AWS virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(AwsVirtualMachine, self).__init__(vm_spec)
self.region = self.zone[:-1]
self.image = self.image or GetImage(self.machine_type, self.region)
self.user_name = FLAGS.aws_user_name
if self.machine_type in NUM_LOCAL_VOLUMES:
self.max_local_drives = NUM_LOCAL_VOLUMES[self.machine_type]
self.local_drive_counter = 0
def ImportKeyfile(self):
"""Imports the public keyfile to AWS."""
with self._lock:
if self.region in self.imported_keyfile_set:
return
cat_cmd = ['cat',
vm_util.GetPublicKeyPath()]
keyfile, _ = vm_util.IssueRetryableCommand(cat_cmd)
import_cmd = util.AWS_PREFIX + [
'ec2', '--region=%s' % self.region,
'import-key-pair',
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri,
'--public-key-material=%s' % keyfile]
vm_util.IssueRetryableCommand(import_cmd)
self.imported_keyfile_set.add(self.region)
if self.region in self.deleted_keyfile_set:
self.deleted_keyfile_set.remove(self.region)
def DeleteKeyfile(self):
"""Deletes the imported keyfile for a region."""
with self._lock:
if self.region in self.deleted_keyfile_set:
return
delete_cmd = util.AWS_PREFIX + [
'ec2', '--region=%s' % self.region,
'delete-key-pair',
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri]
vm_util.IssueRetryableCommand(delete_cmd)
self.deleted_keyfile_set.add(self.region)
if self.region in self.imported_keyfile_set:
self.imported_keyfile_set.remove(self.region)
@vm_util.Retry()
def _PostCreate(self):
"""Get the instance's data and tag it."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-instances',
'--region=%s' % self.region,
'--instance-ids=%s' % self.id]
logging.info('Getting instance %s public IP. This will fail until '
'a public IP is available, but will be retried.', self.id)
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
instance = response['Reservations'][0]['Instances'][0]
self.ip_address = instance['PublicIpAddress']
self.internal_ip = instance['PrivateIpAddress']
self.group_id = instance['SecurityGroups'][0]['GroupId']
util.AddDefaultTags(self.id, self.region)
def _CreateDependencies(self):
"""Create VM dependencies."""
self.ImportKeyfile()
def _DeleteDependencies(self):
"""Delete VM dependencies."""
self.DeleteKeyfile()
def _Create(self):
"""Create a VM instance."""
super(AwsVirtualMachine, self)._Create()
placement = 'AvailabilityZone=%s' % self.zone
if IsPlacementGroupCompatible(self.machine_type):
placement += ',GroupName=%s' % self.network.placement_group.name
block_device_map = GetBlockDeviceMap(self.machine_type)
create_cmd = util.AWS_PREFIX + [
'ec2',
'run-instances',
'--region=%s' % self.region,
'--subnet-id=%s' % self.network.subnet.id,
'--associate-public-ip-address',
'--image-id=%s' % self.image,
'--instance-type=%s' % self.machine_type,
'--placement=%s' % placement,
'--key-name=%s' % 'perfkit-key-%s' % FLAGS.run_uri]
if block_device_map:
create_cmd.append('--block-device-mappings=%s' % block_device_map)
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.id = response['Instances'][0]['InstanceId']
def _Delete(self):
"""Delete a VM instance."""
delete_cmd = util.AWS_PREFIX + [
'ec2',
'terminate-instances',
'--region=%s' % self.region,
'--instance-ids=%s' % self.id]
vm_util.IssueCommand(delete_cmd)
def _Exists(self):
"""Returns true if the VM exists."""
describe_cmd = util.AWS_PREFIX + [
'ec2',
'describe-instances',
'--region=%s' % self.region,
'--filter=Name=instance-id,Values=%s' % self.id]
stdout, _ = vm_util.IssueRetryableCommand(describe_cmd)
response = json.loads(stdout)
reservations = response['Reservations']
assert len(reservations) < 2, 'Too many reservations.'
if not reservations:
return False
instances = reservations[0]['Instances']
assert len(instances) == 1, 'Wrong number of instances.'
status = instances[0]['State']['Name']
assert status in INSTANCE_KNOWN_STATUSES, status
return status in INSTANCE_EXISTS_STATUSES
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
volume = aws_disk.AwsDisk(disk_spec, self.zone)
self.scratch_disks.append(volume)
if volume.disk_type == disk.LOCAL:
if self.local_drive_counter >= self.max_local_drives:
raise errors.Error('Not enough local drives.')
volume.device_letter = chr(ord(DRIVE_START_LETTER) +
self.local_drive_counter)
self.local_drive_counter += 1
else:
volume.Create()
util.AddDefaultTags(volume.id, self.region)
volume.Attach(self)
self.FormatDisk(volume.GetDevicePath())
self.MountDisk(volume.GetDevicePath(), disk_spec.mount_point)
def GetLocalDrives(self):
"""Returns a list of local drives on the VM.
Returns:
A list of strings, where each string is the absolute path to the local
drives on the VM (e.g. '/dev/sdb').
"""
return ['/dev/xvd%s' % chr(ord(DRIVE_START_LETTER) + i)
for i in xrange(NUM_LOCAL_VOLUMES[self.machine_type])]
def SetupLocalDrives(self):
"""Performs AWS specific setup of local drives."""
# Some images may automount one local drive, but we don't
# want to fail if this wasn't the case.
self.RemoteCommand('sudo umount /mnt', ignore_failure=True)
def AddMetadata(self, **kwargs):
"""Adds metadata to the VM."""
util.AddTags(self.id, self.region, **kwargs)
class DebianBasedAwsVirtualMachine(AwsVirtualMachine,
package_managers.AptMixin):
pass
class RhelBasedAwsVirtualMachine(AwsVirtualMachine,
package_managers.YumMixin):
pass
|
|
# Copyright (C) 2010 Chris Jerdonek ([email protected])
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Supports the parsing of command-line options for check_blink_style.py."""
import logging
from optparse import OptionParser
import sys
from blinkpy.style.filter import validate_filter_rules
# This module should not import anything from checker.py.
_log = logging.getLogger(__name__)
_USAGE = """usage: %prog [--help] [options] [path1] [path2] ...
Overview:
Check coding style according to WebKit style guidelines:
http://webkit.org/coding/coding-style.html
Path arguments can be files and directories. If neither a git commit nor
paths are passed, then all changes in your source control working directory
are checked.
Style errors:
This script assigns to every style error a confidence score from 1-5 and
a category name. A confidence score of 5 means the error is certainly
a problem, and 1 means it could be fine.
Category names appear in error messages in brackets, for example
[whitespace/indent]. See the options section below for an option that
displays all available categories and which are reported by default.
Filters:
Use filters to configure what errors to report. Filters are specified using
a comma-separated list of boolean filter rules. The script reports errors
in a category if the category passes the filter, as described below.
All categories start out passing. Boolean filter rules are then evaluated
from left to right, with later rules taking precedence. For example, the
rule "+foo" passes any category that starts with "foo", and "-foo" fails
any such category. The filter input "-whitespace,+whitespace/braces" fails
the category "whitespace/tab" and passes "whitespace/braces".
Examples: --filter=-whitespace,+whitespace/braces
--filter=-whitespace,-runtime/printf,+runtime/printf_format
--filter=-,+build/include_what_you_use
Paths:
Certain style-checking behavior depends on the paths relative to
the WebKit source root of the files being checked. For example,
certain types of errors may be handled differently for files in
WebKit/gtk/webkit/ (e.g. by suppressing "readability/naming" errors
for files in this directory).
Consequently, if the path relative to the source root cannot be
determined for a file being checked, then style checking may not
work correctly for that file. This can occur, for example, if no
WebKit checkout can be found, or if the source root can be detected,
but one of the files being checked lies outside the source tree.
If a WebKit checkout can be detected and all files being checked
are in the source tree, then all paths will automatically be
converted to paths relative to the source root prior to checking.
This is also useful for display purposes.
Currently, this command can detect the source root only if the
command is run from within a WebKit checkout (i.e. if the current
working directory is below the root of a checkout). In particular,
it is not recommended to run this script from a directory outside
a checkout.
Running this script from a top-level WebKit source directory and
checking only files in the source tree will ensure that all style
checking behaves correctly -- whether or not a checkout can be
detected. This is because all file paths will already be relative
to the source root and so will not need to be converted."""
_EPILOG = ('This script can miss errors and does not substitute for '
'code review.')
# This class should not have knowledge of the flag key names.
class DefaultCommandOptionValues(object):
"""Stores the default check_blink_style.py command-line options.
Attributes:
output_format: A string that is the default output format.
min_confidence: An integer that is the default minimum confidence level.
"""
def __init__(self, min_confidence, output_format):
self.min_confidence = min_confidence
self.output_format = output_format
# This class should not have knowledge of the flag key names.
class CommandOptionValues(object):
"""Stores the option values passed by the user via the command line.
Attributes:
is_verbose: A boolean value of whether verbose logging is enabled.
filter_rules: The list of filter rules provided by the user.
These rules are appended to the base rules and
path-specific rules and so take precedence over
the base filter rules, etc.
git_commit: A string representing the git commit to check.
The default is None.
min_confidence: An integer between 1 and 5 inclusive that is the
minimum confidence level of style errors to report.
The default is 1, which reports all errors.
output_format: A string that is the output format. The supported
output formats are "emacs" which emacs can parse
and "vs7" which Microsoft Visual Studio 7 can parse.
"""
def __init__(self,
filter_rules=None,
git_commit=None,
diff_files=None,
is_verbose=False,
min_confidence=1,
output_format='emacs'):
if filter_rules is None:
filter_rules = []
if (min_confidence < 1) or (min_confidence > 5):
raise ValueError('Invalid "min_confidence" parameter: value '
'must be an integer between 1 and 5 inclusive. '
'Value given: "%s".' % min_confidence)
if output_format not in ('emacs', 'vs7'):
raise ValueError('Invalid "output_format" parameter: '
'value must be "emacs" or "vs7". '
'Value given: "%s".' % output_format)
self.filter_rules = filter_rules
self.git_commit = git_commit
self.diff_files = diff_files
self.is_verbose = is_verbose
self.min_confidence = min_confidence
self.output_format = output_format
# Useful for unit testing.
def __eq__(self, other):
"""Return whether this instance is equal to another."""
if self.filter_rules != other.filter_rules:
return False
if self.git_commit != other.git_commit:
return False
if self.diff_files != other.diff_files:
return False
if self.is_verbose != other.is_verbose:
return False
if self.min_confidence != other.min_confidence:
return False
if self.output_format != other.output_format:
return False
return True
# Useful for unit testing.
def __ne__(self, other):
# Python does not automatically deduce this from __eq__().
return not self.__eq__(other)
class ArgumentPrinter(object):
"""Supports the printing of check_blink_style.py command arguments."""
def _flag_pair_to_string(self, flag_key, flag_value):
return '--%(key)s=%(val)s' % {'key': flag_key, 'val': flag_value}
def to_flag_string(self, options):
"""Return a flag string of the given CommandOptionValues instance.
This method orders the flag values alphabetically by the flag key.
Args:
options: A CommandOptionValues instance.
"""
flags = {}
flags['min-confidence'] = options.min_confidence
flags['output'] = options.output_format
# Only include the filter flag if user-provided rules are present.
filter_rules = options.filter_rules
if filter_rules:
flags['filter'] = ','.join(filter_rules)
if options.git_commit:
flags['git-commit'] = options.git_commit
if options.diff_files:
flags['diff_files'] = options.diff_files
flag_string = ''
# Alphabetizing lets us unit test this method.
for key in sorted(flags.keys()):
flag_string += self._flag_pair_to_string(key, flags[key]) + ' '
return flag_string.strip()
class ArgumentParser(object):
# FIXME: Move the documentation of the attributes to the __init__
# docstring after making the attributes internal.
"""Supports the parsing of check_blink_style.py command arguments.
Attributes:
create_usage: A function that accepts a DefaultCommandOptionValues
instance and returns a string of usage instructions.
Defaults to the function that generates the usage
string for check_blink_style.py.
default_options: A DefaultCommandOptionValues instance that provides
the default values for options not explicitly
provided by the user.
stderr_write: A function that takes a string as a parameter and
serves as stderr.write. Defaults to sys.stderr.write.
This parameter should be specified only for unit tests.
"""
def __init__(self,
all_categories,
default_options,
base_filter_rules=None,
mock_stderr=None,
usage=None):
"""Create an ArgumentParser instance.
Args:
all_categories: The set of all available style categories.
default_options: See the corresponding attribute in the class
docstring.
Keyword Args:
base_filter_rules: The list of filter rules at the beginning of
the list of rules used to check style. This
list has the least precedence when checking
style and precedes any user-provided rules.
The class uses this parameter only for display
purposes to the user. Defaults to the empty list.
create_usage: See the documentation of the corresponding
attribute in the class docstring.
stderr_write: See the documentation of the corresponding
attribute in the class docstring.
"""
if base_filter_rules is None:
base_filter_rules = []
stderr = sys.stderr if mock_stderr is None else mock_stderr
if usage is None:
usage = _USAGE
self._all_categories = all_categories
self._base_filter_rules = base_filter_rules
# FIXME: Rename these to reflect that they are internal.
self.default_options = default_options
self.stderr_write = stderr.write
self._parser = self._create_option_parser(
stderr=stderr,
usage=usage,
default_min_confidence=self.default_options.min_confidence,
default_output_format=self.default_options.output_format)
def _create_option_parser(self, stderr, usage, default_min_confidence,
default_output_format):
# Since the epilog string is short, it is not necessary to replace
# the epilog string with a mock epilog string when testing.
# For this reason, we use _EPILOG directly rather than passing it
# as an argument like we do for the usage string.
parser = OptionParser(usage=usage, epilog=_EPILOG)
filter_help = (
'set a filter to control what categories of style '
'errors to report. Specify a filter using a comma-'
'delimited list of boolean filter rules, for example '
'"--filter -whitespace,+whitespace/braces". To display '
'all categories and which are enabled by default, pass '
"""no value (e.g. '-f ""' or '--filter=').""")
parser.add_option(
'-f',
'--filter-rules',
metavar='RULES',
dest='filter_value',
help=filter_help)
git_commit_help = (
'check all changes in the given commit. '
"Use 'commit_id..' to check all changes after commit_id")
parser.add_option(
'-g',
'--git-diff',
'--git-commit',
metavar='COMMIT',
dest='git_commit',
help=git_commit_help,
)
diff_files_help = 'diff the files passed on the command line rather than checking the style of every line'
parser.add_option(
'--diff-files',
action='store_true',
dest='diff_files',
default=False,
help=diff_files_help)
min_confidence_help = ('set the minimum confidence of style errors '
'to report. Can be an integer 1-5, with 1 '
'displaying all errors. Defaults to %default.')
parser.add_option(
'-m',
'--min-confidence',
metavar='INT',
type='int',
dest='min_confidence',
default=default_min_confidence,
help=min_confidence_help)
output_format_help = ('set the output format, which can be "emacs" '
'or "vs7" (for Visual Studio). '
'Defaults to "%default".')
parser.add_option(
'-o',
'--output-format',
metavar='FORMAT',
choices=['emacs', 'vs7'],
dest='output_format',
default=default_output_format,
help=output_format_help)
verbose_help = 'enable verbose logging.'
parser.add_option(
'-v',
'--verbose',
dest='is_verbose',
default=False,
action='store_true',
help=verbose_help)
# Override OptionParser's error() method so that option help will
# also display when an error occurs. Normally, just the usage
# string displays and not option help.
parser.error = self._parse_error
# Override OptionParser's print_help() method so that help output
# does not render to the screen while running unit tests.
print_help = parser.print_help
parser.print_help = lambda file=stderr: print_help(file=file)
return parser
def _parse_error(self, error_message):
"""Print the help string and an error message, and exit."""
# The method format_help() includes both the usage string and
# the flag options.
help = self._parser.format_help()
# Separate help from the error message with a single blank line.
self.stderr_write(help + '\n')
if error_message:
_log.error(error_message)
# Since we are using this method to replace/override the Python
# module optparse's OptionParser.error() method, we match its
# behavior and exit with status code 2.
#
# As additional background, Python documentation says--
#
# "Unix programs generally use 2 for command line syntax errors
# and 1 for all other kind of errors."
#
# (from http://docs.python.org/library/sys.html#sys.exit )
sys.exit(2)
def _exit_with_categories(self):
"""Exit and print the style categories and default filter rules."""
self.stderr_write('\nAll categories:\n')
for category in sorted(self._all_categories):
self.stderr_write(' ' + category + '\n')
self.stderr_write('\nDefault filter rules**:\n')
for filter_rule in sorted(self._base_filter_rules):
self.stderr_write(' ' + filter_rule + '\n')
self.stderr_write('\n**The command always evaluates the above rules, '
'and before any --filter flag.\n\n')
sys.exit(0)
def _parse_filter_flag(self, flag_value):
"""Parse the --filter flag, and return a list of filter rules.
Args:
flag_value: A string of comma-separated filter rules, for
example "-whitespace,+whitespace/indent".
"""
filters = []
for uncleaned_filter in flag_value.split(','):
filter = uncleaned_filter.strip()
if not filter:
continue
filters.append(filter)
return filters
def parse(self, args):
"""Parse the command line arguments to check_blink_style.py.
Args:
args: A list of command-line arguments as returned by sys.argv[1:].
Returns:
A tuple of (paths, options)
paths: The list of paths to check.
options: A CommandOptionValues instance.
"""
(options, paths) = self._parser.parse_args(args=args)
filter_value = options.filter_value
git_commit = options.git_commit
diff_files = options.diff_files
is_verbose = options.is_verbose
min_confidence = options.min_confidence
output_format = options.output_format
if filter_value is not None and not filter_value:
# Then the user explicitly passed no filter, for
# example "-f ''" or "--filter=".
self._exit_with_categories()
# Validate user-provided values.
min_confidence = int(min_confidence)
if (min_confidence < 1) or (min_confidence > 5):
self._parse_error(
'option --min-confidence: invalid integer: '
'%s: value must be between 1 and 5' % min_confidence)
if filter_value:
filter_rules = self._parse_filter_flag(filter_value)
else:
filter_rules = []
try:
validate_filter_rules(filter_rules, self._all_categories)
except ValueError as err:
self._parse_error(err)
options = CommandOptionValues(
filter_rules=filter_rules,
git_commit=git_commit,
diff_files=diff_files,
is_verbose=is_verbose,
min_confidence=min_confidence,
output_format=output_format)
return (paths, options)
|
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
from typing import Awaitable, Callable, Dict, Optional, Sequence, Union
import pkg_resources
import google.auth # type: ignore
import google.api_core
from google.api_core import exceptions as core_exceptions
from google.api_core import gapic_v1
from google.api_core import retry as retries
from google.api_core import operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.cloud.aiplatform_v1.types import pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_job as gca_pipeline_job
from google.cloud.aiplatform_v1.types import pipeline_service
from google.cloud.aiplatform_v1.types import training_pipeline
from google.cloud.aiplatform_v1.types import training_pipeline as gca_training_pipeline
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class PipelineServiceTransport(abc.ABC):
"""Abstract transport class for PipelineService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/cloud-platform",)
DEFAULT_HOST: str = "aiplatform.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scopes = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file, **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_training_pipeline: gapic_v1.method.wrap_method(
self.create_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.get_training_pipeline: gapic_v1.method.wrap_method(
self.get_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.list_training_pipelines: gapic_v1.method.wrap_method(
self.list_training_pipelines,
default_timeout=None,
client_info=client_info,
),
self.delete_training_pipeline: gapic_v1.method.wrap_method(
self.delete_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.cancel_training_pipeline: gapic_v1.method.wrap_method(
self.cancel_training_pipeline,
default_timeout=None,
client_info=client_info,
),
self.create_pipeline_job: gapic_v1.method.wrap_method(
self.create_pipeline_job, default_timeout=None, client_info=client_info,
),
self.get_pipeline_job: gapic_v1.method.wrap_method(
self.get_pipeline_job, default_timeout=None, client_info=client_info,
),
self.list_pipeline_jobs: gapic_v1.method.wrap_method(
self.list_pipeline_jobs, default_timeout=None, client_info=client_info,
),
self.delete_pipeline_job: gapic_v1.method.wrap_method(
self.delete_pipeline_job, default_timeout=None, client_info=client_info,
),
self.cancel_pipeline_job: gapic_v1.method.wrap_method(
self.cancel_pipeline_job, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_training_pipeline(
self,
) -> Callable[
[pipeline_service.CreateTrainingPipelineRequest],
Union[
gca_training_pipeline.TrainingPipeline,
Awaitable[gca_training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def get_training_pipeline(
self,
) -> Callable[
[pipeline_service.GetTrainingPipelineRequest],
Union[
training_pipeline.TrainingPipeline,
Awaitable[training_pipeline.TrainingPipeline],
],
]:
raise NotImplementedError()
@property
def list_training_pipelines(
self,
) -> Callable[
[pipeline_service.ListTrainingPipelinesRequest],
Union[
pipeline_service.ListTrainingPipelinesResponse,
Awaitable[pipeline_service.ListTrainingPipelinesResponse],
],
]:
raise NotImplementedError()
@property
def delete_training_pipeline(
self,
) -> Callable[
[pipeline_service.DeleteTrainingPipelineRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_training_pipeline(
self,
) -> Callable[
[pipeline_service.CancelTrainingPipelineRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
@property
def create_pipeline_job(
self,
) -> Callable[
[pipeline_service.CreatePipelineJobRequest],
Union[gca_pipeline_job.PipelineJob, Awaitable[gca_pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def get_pipeline_job(
self,
) -> Callable[
[pipeline_service.GetPipelineJobRequest],
Union[pipeline_job.PipelineJob, Awaitable[pipeline_job.PipelineJob]],
]:
raise NotImplementedError()
@property
def list_pipeline_jobs(
self,
) -> Callable[
[pipeline_service.ListPipelineJobsRequest],
Union[
pipeline_service.ListPipelineJobsResponse,
Awaitable[pipeline_service.ListPipelineJobsResponse],
],
]:
raise NotImplementedError()
@property
def delete_pipeline_job(
self,
) -> Callable[
[pipeline_service.DeletePipelineJobRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def cancel_pipeline_job(
self,
) -> Callable[
[pipeline_service.CancelPipelineJobRequest],
Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]],
]:
raise NotImplementedError()
__all__ = ("PipelineServiceTransport",)
|
|
import cPickle
import time
from smqtk.representation.descriptor_index import DescriptorIndex
# Try to import required module
try:
import solr
except ImportError:
solr = None
__author__ = "[email protected]"
class SolrDescriptorIndex (DescriptorIndex):
"""
Descriptor index that uses a Solr instance as a backend storage medium.
Fields where components are stored within a document are specified at
construction time. We optionally set the ``id`` field to a string UUID.
``id`` is set because it is a common, required field for unique
identification of documents.
Descriptor UUIDs should maintain their uniqueness when converted to a
string, otherwise this backend will not work well when querying.
"""
@classmethod
def is_usable(cls):
return solr is not None
def __init__(self, solr_conn_addr, index_uuid,
index_uuid_field, d_uid_field, descriptor_field,
timestamp_field, solr_params=None,
commit_on_add=True, max_boolean_clauses=1024,
pickle_protocol=-1):
"""
Construct a descriptor index pointing to a Solr instance.
:param solr_conn_addr: HTTP(S) address for the Solr index to use
:type solr_conn_addr: str
:param index_uuid: Unique ID for the descriptor index to use within the
configured Solr index.
:type index_uuid: str
:param index_uuid_field: Solr index field to store/locate index UUID
value.
:type index_uuid_field: str
:param d_uid_field: Solr index field to store/locate descriptor UUID
values
:type d_uid_field: str
:param descriptor_field: Solr index field to store the code-associated
descriptor object.
:type descriptor_field: str
:param timestamp_field: Solr index field to store floating-point UNIX
timestamps.
:type timestamp_field: str
:param solr_params: Dictionary of additional keyword parameters to set
in the ``solr.Solr`` instance used. See the ``pysolr``
documentation for available parameters and values.
:type solr_params: dict[str, object]
:param commit_on_add: Immediately commit changes when one or many
descriptor are added.
:type commit_on_add: bool
:param max_boolean_clauses: Solr instance's configured
maxBooleanClauses configuration property (found in solrconfig.xml
file). This is needed so we can correctly chunk up batch queries
without breaking the server. This may also be less than the Solr
instance's set value.
:type max_boolean_clauses: int
:param pickle_protocol: Pickling protocol to use. We will use -1 by
default (latest version, probably binary).
:type pickle_protocol: int
"""
super(SolrDescriptorIndex, self).__init__()
self.index_uuid = index_uuid
self.index_uuid_field = index_uuid_field
self.d_uid_field = d_uid_field
self.descriptor_field = descriptor_field
self.timestamp_field = timestamp_field
self.commit_on_add = commit_on_add
self.max_boolean_clauses = int(max_boolean_clauses)
assert self.max_boolean_clauses >= 2, "Need more clauses"
self.pickle_protocol = pickle_protocol
self.solr_params = solr_params
self.solr = solr.Solr(solr_conn_addr, **solr_params)
def __getstate__(self):
return self.get_config()
def __setstate__(self, state):
state['solr'] = solr.Solr(state["solr_conn_addr"],
**state['solr_params'])
del state['solr_conn_addr']
self.__dict__.update(state)
def _doc_for_code_descr(self, d):
"""
Generate standard identifying document base for the given
descriptor element.
"""
uuid = d.uuid()
return {
'id': '-'.join([self.index_uuid, uuid]),
self.index_uuid_field: self.index_uuid,
self.d_uid_field: uuid,
}
def get_config(self):
return {
"solr_conn_addr": self.solr.url,
"index_uuid": self.index_uuid,
"index_uuid_field": self.index_uuid_field,
"d_uid_field": self.d_uid_field,
"descriptor_field": self.descriptor_field,
"timestamp_field": self.timestamp_field,
"solr_params": self.solr_params,
"commit_on_add": self.commit_on_add,
"max_boolean_clauses": self.max_boolean_clauses,
"pickle_protocol": self.pickle_protocol,
}
def count(self):
"""
:return: Number of descriptor elements stored in this index.
:rtype: int
"""
return int(self.solr.
select("%s:%s AND %s:*"
% (self.index_uuid_field, self.index_uuid,
self.descriptor_field))
.numFound)
def clear(self):
"""
Clear this descriptor index's entries.
"""
self.solr.delete_query("%s:%s"
% (self.index_uuid_field, self.index_uuid))
self.solr.commit()
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
# Try to select the descriptor
# TODO: Probably a better way of doing this that's more efficient.
return bool(
self.solr.select("%s:%s AND %s:%s"
% (self.index_uuid_field, self.index_uuid,
self.d_uid_field, uuid)).numFound
)
def add_descriptor(self, descriptor):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
"""
doc = self._doc_for_code_descr(descriptor)
doc[self.descriptor_field] = cPickle.dumps(descriptor,
self.pickle_protocol)
doc[self.timestamp_field] = time.time()
self.solr.add(doc, commit=self.commit_on_add)
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
documents = []
for d in descriptors:
doc = self._doc_for_code_descr(d)
doc[self.descriptor_field] = cPickle.dumps(d, self.pickle_protocol)
doc[self.timestamp_field] = time.time()
documents.append(doc)
self.solr.add_many(documents)
if self.commit_on_add:
self.solr.commit()
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
return tuple(self.get_many_descriptors(uuid))[0]
def get_many_descriptors(self, *uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated 1-to-1 to given uuid values.
:rtype: __generator[smqtk.representation.DescriptorElement]
"""
# Chunk up query based on max clauses available to us
def batch_query(batch):
"""
:type batch: list[collections.Hashable]
"""
query = ' OR '.join([self.d_uid_field + (':%s' % uid)
for uid in batch])
r = self.solr.select("%s:%s AND (%s)"
% (self.index_uuid_field, self.index_uuid,
query))
# result batches come in chunks of 10
for doc in r.results:
yield cPickle.loads(doc[self.descriptor_field])
for j in xrange(r.numFound // 10):
r = r.next_batch()
for doc in r.results:
yield cPickle.loads(doc[self.descriptor_field])
batch = []
for uid in uuids:
batch.append(uid)
# Will end up using max_clauses-1 OR statements, and one AND
if len(batch) == self.max_boolean_clauses:
for d in batch_query(batch):
yield d
batch = []
# tail batch
if batch:
assert len(batch) < self.max_boolean_clauses
for d in batch_query(batch):
yield d
def remove_descriptor(self, uuid):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
"""
self.remove_many_descriptors(uuid)
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: tuple[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
# Chunk up operation based on max clauses available to us
def batch_op(batch):
"""
:type batch: list[collections.Hashable]
"""
uuid_query = ' OR '.join([self.d_uid_field + (':%s' % str(uid))
for uid in batch])
self.solr.delete("%s:%s AND (%s)"
% (self.index_uuid_field, self.index_uuid,
uuid_query))
batch = []
for uid in uuids:
batch.append(uid)
# Will end up using max_clauses-1 OR statements, and one AND
if len(batch) == self.max_boolean_clauses:
batch_op(batch)
batch = []
# tail batch
if batch:
batch_op(batch)
def iterkeys(self):
"""
Return an iterator over indexed descriptor keys, which are their UUIDs.
"""
r = self.solr.select('%s:%s %s:*'
% (self.index_uuid_field, self.index_uuid,
self.d_uid_field))
for doc in r.results:
yield doc[self.d_uid_field]
for _ in xrange(r.numFound // 10):
r = r.next_batch()
for doc in r.results:
yield doc[self.d_uid_field]
def iterdescriptors(self):
"""
Return an iterator over indexed descriptor element instances.
"""
r = self.solr.select('%s:%s %s:*'
% (self.index_uuid_field, self.index_uuid,
self.descriptor_field))
for doc in r.results:
yield cPickle.loads(doc[self.descriptor_field])
for _ in xrange(r.numFound // 10):
r = r.next_batch()
for doc in r.results:
yield cPickle.loads(doc[self.descriptor_field])
def iteritems(self):
"""
Return an iterator over indexed descriptor key and instance pairs.
"""
r = self.solr.select('%s:%s %s:* %s:*'
% (self.index_uuid_field, self.index_uuid,
self.d_uid_field, self.descriptor_field))
for doc in r.results:
d = cPickle.loads(doc[self.descriptor_field])
yield d.uuid(), d
for _ in xrange(r.numFound // 10):
r = r.next_batch()
for doc in r.results:
d = cPickle.loads(doc[self.descriptor_field])
yield d.uuid(), d
|
|
## A script for extracting info about the patients used in the analysis
## Load necessary modules
from rpy2 import robjects as ro
import numpy as np
import os
ro.r('library(survival)')
import re
##This call will only work if you are running python from the command line.
##If you are not running from the command line manually type in your paths.
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
## There were three clinical files with nonredundant data. V4.0 is in general the most uptodate, but it is possible
## for data in the other files to be more uptodate. As a result, clinical data will be merged.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v4.0_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
## A patient can be listed multiple times in the file. The most recent listing (furthest down in the file), contains the most recent
## follow up data. This code checks if the patient has already been loaded into the list, and if so, takes the more recent data.
## This required an empty value in the list initialization.
## Data is: [[Patient ID, time(days), Vital status],[Patient ID, time(days), Vital status],...]
clinical1=[['','','']]
for i in data:
if clinical1[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical1[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical1[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical1.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical1.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
## Removing the empty value.
clinical1=clinical1[1:]
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v2.1_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical2=[['','','']]
for i in data:
if clinical2[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical2[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical2[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical2.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical2.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical2=clinical2[1:]
##merging the data
new_clinical=[]
for i in clinical2:
if i[0] not in [j[0] for j in clinical1]:
new_clinical.append(i)
else:
if i[1]<=clinical1[[j[0] for j in clinical1].index(i[0])][1]:
new_clinical.append(clinical1[[j[0] for j in clinical1].index(i[0])])
else:
new_clinical.append(i)
for i in clinical1:
if i[0] not in [j[0] for j in new_clinical]:
new_clinical.append(i)
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_follow_up_v1.5_brca.txt'))
##get the column indexes needed
columns=f.readline().split('\t')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical3=[['','','']]
for i in data:
if clinical3[-1][0]==i[patient_column]:
if re.search('^[0-9]+$',i[alive_column]):
clinical3[-1]=[i[patient_column],int(i[alive_column]),'Alive']
elif re.search('^[0-9]+$',i[death_column]):
clinical3[-1]=[i[patient_column],int(i[death_column]),'Dead']
else:
pass
else:
if re.search('^[0-9]+$',i[alive_column]):
clinical3.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical3.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
##removing the empty value
clinical3=clinical3[1:]
##merging the data
newer_clinical=[]
for i in clinical3:
if i[0] not in [j[0] for j in new_clinical]:
newer_clinical.append(i)
else:
if i[1]<=new_clinical[[j[0] for j in new_clinical].index(i[0])][1]:
newer_clinical.append(new_clinical[[j[0] for j in new_clinical].index(i[0])])
else:
newer_clinical.append(i)
for i in new_clinical:
if i[0] not in [j[0] for j in newer_clinical]:
newer_clinical.append(i)
## Grade, sex, and age information were taken from the "clinical_patient" file. A dictionary was created for sex and grade.
more_clinical={}
grade_dict={}
grade_dict['Infiltrating Ductal Carcinoma']=1
grade_dict['Metaplastic Carcinoma']=3
grade_dict['Mucinous Carcinoma']=4
grade_dict['Medullary Carcinoma']=5
grade_dict['Infiltrating Lobular Carcinoma']=6
sex_dict={}
sex_dict['MALE']=0
sex_dict['FEMALE']=1
## The "clinical_patient" file can also contain patients not listed in the follow_up files.
## In these cases the clinical data for these patients gets appended to a new clinical list.
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','clinical','nationwidechildrens.org_clinical_patient_brca.txt'))
columns=f.readline().split('\t')
grade_column=columns.index('histological_type')
sex_column=columns.index('gender')
age_column=columns.index('age_at_diagnosis')
patient_column=columns.index('bcr_patient_barcode')
alive_column=columns.index('last_contact_days_to')
death_column=columns.index('death_days_to')
f.readline()
f.readline()
data=[i.split('\t') for i in f]
clinical4=[]
for i in data:
try:
more_clinical[i[patient_column]]=[grade_dict[i[grade_column]],sex_dict[i[sex_column]],int(i[age_column])]
if re.search('^[0-9]+$',i[alive_column]):
clinical4.append([i[patient_column],int(i[alive_column]),'Alive'])
elif re.search('^[0-9]+$',i[death_column]):
clinical4.append([i[patient_column],int(i[death_column]),'Dead'])
else:
pass
except:
pass
newest_clinical=[]
##It is possible that the clinical data in the clinical_patient file is more up to date than the follow_up files
##All the clinical data is merged checking which data is the most up to date
for i in clinical4:
if i[0] not in [j[0] for j in newer_clinical]:
newest_clinical.append(i)
else:
if i[1]<=newer_clinical[[j[0] for j in newer_clinical].index(i[0])][1]:
newest_clinical.append(newer_clinical[[j[0] for j in newer_clinical].index(i[0])])
else:
newest_clinical.append(i)
##also do the reverse since clinical can contain patients not included in clinical4
for i in newer_clinical:
if i[0] not in [j[0] for j in newest_clinical]:
newest_clinical.append(i)
## only patients who had a follow up time greater than 0 days are included in the analysis
clinical=[i for i in newest_clinical if i[1]>0]
final_clinical=[]
## A new list containing both follow up times and grade, sex, and age is constructed.
## Only patients with grade, sex, and age information are included.
## Data is [[Patient ID, time (days), vital status, grade, sex, age at diagnosis],...]
for i in clinical:
if i[0] in more_clinical:
final_clinical.append(i+more_clinical[i[0]])
## Need to map the mRNA files to the correct patients
## The necessary information is included in the FILE_SAMPLE_MAP.txt file
f=open(os.path.join(BASE_DIR,'tcga_data','BRCA','FILE_SAMPLE_MAP_mrna.txt'))
f.readline()
data=[i.strip().split() for i in f if i!='\n']
## 01 indicates a primary tumor, and only primary tumors are included in this analysis
TCGA_to_mrna={}
for i in data:
## The normalized data files are used
if 'genes.normalized_results' in i[0]:
if i[1].split('-')[3][:-1]=='01':
x=''.join([k+j for k,j in zip(['','-','-'],i[1].split('-')[:3])])
TCGA_to_mrna[x]=TCGA_to_mrna.get(x,[])+[i[0]]
clinical_and_files=[]
## I only care about patients that contained complete clinical information
for i in final_clinical:
if TCGA_to_mrna.has_key(i[0]):
## The mRNA files are added to the clinical list
## Data structure: [[Patient ID, time (days), vital status, grade, sex, age at diagnosis,[mRNA files]],...]
clinical_and_files.append(i+[TCGA_to_mrna[i[0]]])
else:
pass
##print average age at diagnosis
age=np.mean([i[5] for i in clinical_and_files])
##print number of males
males=len([i for i in clinical_and_files if i[4]==0])
##print number of females
females=len([i for i in clinical_and_files if i[4]==1])
##to get the median survival we need to call survfit from r
##prepare variables for R
ro.globalenv['times']=ro.IntVector([i[1] for i in clinical_and_files])
##need to create a dummy variable group
ro.globalenv['group']=ro.IntVector([0 for i in clinical_and_files])
##need a vector for deaths
death_dic={}
death_dic['Alive']=0
death_dic['Dead']=1
ro.globalenv['died']=ro.IntVector([death_dic[i[2]] for i in clinical_and_files])
res=ro.r('survfit(Surv(times,died) ~ as.factor(group))')
#the number of events(deaths) is the fourth column of the output
deaths=str(res).split('\n')[-2].strip().split()[3]
#the median survival time is the fifth column of the output
median=str(res).split('\n')[-2].strip().split()[4]
##write data to a file
f=open('patient_info.txt','w')
f.write('Average Age')
f.write('\t')
f.write('Males')
f.write('\t')
f.write('Females')
f.write('\t')
f.write('Deaths')
f.write('\t')
f.write('Median Survival')
f.write('\n')
f.write(str(age))
f.write('\t')
f.write(str(males))
f.write('\t')
f.write(str(females))
f.write('\t')
f.write(deaths)
f.write('\t')
f.write(median)
f.close()
|
|
"""The tests for the manual_mqtt Alarm Control Panel component."""
from datetime import timedelta
import unittest
from unittest.mock import patch
from homeassistant.setup import setup_component
from homeassistant.const import (
STATE_ALARM_DISARMED, STATE_ALARM_ARMED_HOME, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_NIGHT, STATE_ALARM_PENDING, STATE_ALARM_TRIGGERED)
from homeassistant.components import alarm_control_panel
import homeassistant.util.dt as dt_util
from tests.common import (
fire_time_changed, get_test_home_assistant,
mock_mqtt_component, fire_mqtt_message, assert_setup_component)
CODE = 'HELLO_CODE'
class TestAlarmControlPanelManualMqtt(unittest.TestCase):
"""Test the manual_mqtt alarm module."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.mock_publish = mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
"""Stop down everything that was started."""
self.hass.stop()
def test_fail_setup_without_state_topic(self):
"""Test for failing with no state topic."""
with assert_setup_component(0) as config:
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt_alarm',
'command_topic': 'alarm/command'
}
})
assert not config[alarm_control_panel.DOMAIN]
def test_fail_setup_without_command_topic(self):
"""Test failing with no command topic."""
with assert_setup_component(0):
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'mqtt_alarm',
'state_topic': 'alarm/state'
}
})
def test_arm_home_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_HOME
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_home_with_invalid_code(self):
"""Attempt to arm home without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_no_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_home_with_template_code(self):
"""Attempt to arm with a template-based code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code_template': '{{ "abc" }}',
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.hass.start()
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, 'abc')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_ARMED_HOME, state.state)
def test_arm_away_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_ARMED_AWAY
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_away_with_invalid_code(self):
"""Attempt to arm away without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_night_no_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_pending(self):
"""Test arm night method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == \
STATE_ALARM_ARMED_NIGHT
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
# Do not go to the pending state when updating to the same state
alarm_control_panel.alarm_arm_night(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_arm_night_with_invalid_code(self):
"""Attempt to arm night without a valid code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_night(self.hass, CODE + '2')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_no_pending(self):
"""Test triggering when no pending submitted method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'trigger_time': 1,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=60)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
def test_trigger_with_delay(self):
"""Test trigger method and switch from pending to triggered."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'delay_time': 1,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_PENDING, state.state)
self.assertEqual(STATE_ALARM_TRIGGERED,
state.attributes['post_pending_state'])
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_TRIGGERED, state.state)
def test_trigger_zero_trigger_time(self):
"""Test disabled trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 0,
'trigger_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_zero_trigger_time_with_pending(self):
"""Test disabled trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 2,
'trigger_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 2,
'trigger_time': 3,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
state = self.hass.states.get(entity_id)
assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_disarm_after_trigger(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': True,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_zero_specific_trigger_time(self):
"""Test trigger method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'trigger_time': 5,
'disarmed': {
'trigger_time': 0
},
'pending_time': 0,
'disarm_after_trigger': True,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_unused_zero_specific_trigger_time(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'trigger_time': 5,
'armed_home': {
'trigger_time': 0
},
'pending_time': 0,
'disarm_after_trigger': True,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_trigger_with_specific_trigger_time(self):
"""Test disarm after trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'disarmed': {
'trigger_time': 5
},
'pending_time': 0,
'disarm_after_trigger': True,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_back_to_back_trigger_with_no_disarm_after_trigger(self):
"""Test no disarm after back to back trigger."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'trigger_time': 5,
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE, entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_disarm_while_pending_trigger(self):
"""Test disarming while pending state."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'trigger_time': 5,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_disarm_during_trigger_with_invalid_code(self):
"""Test disarming while code is invalid."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 5,
'code': CODE + '2',
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_disarm(self.hass, entity_id=entity_id)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
def test_trigger_with_unused_specific_delay(self):
"""Test trigger method and switch from pending to triggered."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'delay_time': 5,
'pending_time': 0,
'armed_home': {
'delay_time': 10
},
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_PENDING, state.state)
self.assertEqual(STATE_ALARM_TRIGGERED,
state.attributes['post_pending_state'])
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
def test_trigger_with_specific_delay(self):
"""Test trigger method and switch from pending to triggered."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'delay_time': 10,
'pending_time': 0,
'armed_away': {
'delay_time': 1
},
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_PENDING, state.state)
self.assertEqual(STATE_ALARM_TRIGGERED,
state.attributes['post_pending_state'])
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
def test_trigger_with_pending_and_delay(self):
"""Test trigger method and switch from pending to triggered."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'delay_time': 1,
'pending_time': 0,
'triggered': {
'pending_time': 1
},
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED
future += timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
def test_trigger_with_pending_and_specific_delay(self):
"""Test trigger method and switch from pending to triggered."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'delay_time': 10,
'pending_time': 0,
'armed_away': {
'delay_time': 1
},
'triggered': {
'pending_time': 1
},
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state'
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_PENDING
assert state.attributes['post_pending_state'] == STATE_ALARM_TRIGGERED
future += timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
assert state.state == STATE_ALARM_TRIGGERED
def test_armed_home_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 10,
'armed_home': {
'pending_time': 2
},
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_arm_home(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_armed_away_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 10,
'armed_away': {
'pending_time': 2
},
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_arm_away(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_armed_night_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 10,
'armed_night': {
'pending_time': 2
},
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_arm_night(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_trigger_with_specific_pending(self):
"""Test arm home method."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 10,
'triggered': {
'pending_time': 2
},
'trigger_time': 3,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=2)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_TRIGGERED,
self.hass.states.get(entity_id).state)
future = dt_util.utcnow() + timedelta(seconds=5)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_arm_away_after_disabled_disarmed(self):
"""Test pending state with and without zero trigger time."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code': CODE,
'pending_time': 0,
'delay_time': 1,
'armed_away': {
'pending_time': 1,
},
'disarmed': {
'trigger_time': 0
},
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_away(self.hass, CODE)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_PENDING, state.state)
self.assertEqual(STATE_ALARM_DISARMED,
state.attributes['pre_pending_state'])
self.assertEqual(STATE_ALARM_ARMED_AWAY,
state.attributes['post_pending_state'])
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_PENDING, state.state)
self.assertEqual(STATE_ALARM_DISARMED,
state.attributes['pre_pending_state'])
self.assertEqual(STATE_ALARM_ARMED_AWAY,
state.attributes['post_pending_state'])
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_ARMED_AWAY, state.state)
alarm_control_panel.alarm_trigger(self.hass, entity_id=entity_id)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_PENDING, state.state)
self.assertEqual(STATE_ALARM_ARMED_AWAY,
state.attributes['pre_pending_state'])
self.assertEqual(STATE_ALARM_TRIGGERED,
state.attributes['post_pending_state'])
future += timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_TRIGGERED, state.state)
def test_disarm_with_template_code(self):
"""Attempt to disarm with a valid or invalid template-based code."""
self.assertTrue(setup_component(
self.hass, alarm_control_panel.DOMAIN,
{'alarm_control_panel': {
'platform': 'manual_mqtt',
'name': 'test',
'code_template':
'{{ "" if from_state == "disarmed" else "abc" }}',
'pending_time': 0,
'disarm_after_trigger': False,
'command_topic': 'alarm/command',
'state_topic': 'alarm/state',
}}))
entity_id = 'alarm_control_panel.test'
self.hass.start()
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_arm_home(self.hass, 'def')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_ARMED_HOME, state.state)
alarm_control_panel.alarm_disarm(self.hass, 'def')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_ARMED_HOME, state.state)
alarm_control_panel.alarm_disarm(self.hass, 'abc')
self.hass.block_till_done()
state = self.hass.states.get(entity_id)
self.assertEqual(STATE_ALARM_DISARMED, state.state)
def test_arm_home_via_command_topic(self):
"""Test arming home via command topic."""
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 1,
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'payload_arm_home': 'ARM_HOME',
}
})
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
# Fire the arm command via MQTT; ensure state changes to pending
fire_mqtt_message(self.hass, 'alarm/command', 'ARM_HOME')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_HOME,
self.hass.states.get(entity_id).state)
def test_arm_away_via_command_topic(self):
"""Test arming away via command topic."""
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 1,
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'payload_arm_away': 'ARM_AWAY',
}
})
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
# Fire the arm command via MQTT; ensure state changes to pending
fire_mqtt_message(self.hass, 'alarm/command', 'ARM_AWAY')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_AWAY,
self.hass.states.get(entity_id).state)
def test_arm_night_via_command_topic(self):
"""Test arming night via command topic."""
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 1,
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'payload_arm_night': 'ARM_NIGHT',
}
})
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
# Fire the arm command via MQTT; ensure state changes to pending
fire_mqtt_message(self.hass, 'alarm/command', 'ARM_NIGHT')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_ARMED_NIGHT,
self.hass.states.get(entity_id).state)
def test_disarm_pending_via_command_topic(self):
"""Test disarming pending alarm via command topic."""
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 1,
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
'payload_disarm': 'DISARM',
}
})
entity_id = 'alarm_control_panel.test'
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
alarm_control_panel.alarm_trigger(self.hass)
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_PENDING,
self.hass.states.get(entity_id).state)
# Now that we're pending, receive a command to disarm
fire_mqtt_message(self.hass, 'alarm/command', 'DISARM')
self.hass.block_till_done()
self.assertEqual(STATE_ALARM_DISARMED,
self.hass.states.get(entity_id).state)
def test_state_changes_are_published_to_mqtt(self):
"""Test publishing of MQTT messages when state changes."""
assert setup_component(self.hass, alarm_control_panel.DOMAIN, {
alarm_control_panel.DOMAIN: {
'platform': 'manual_mqtt',
'name': 'test',
'pending_time': 1,
'trigger_time': 1,
'state_topic': 'alarm/state',
'command_topic': 'alarm/command',
}
})
# Component should send disarmed alarm state on startup
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_DISARMED, 0, True)
self.mock_publish.async_publish.reset_mock()
# Arm in home mode
alarm_control_panel.alarm_arm_home(self.hass)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_PENDING, 0, True)
self.mock_publish.async_publish.reset_mock()
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_ARMED_HOME, 0, True)
self.mock_publish.async_publish.reset_mock()
# Arm in away mode
alarm_control_panel.alarm_arm_away(self.hass)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_PENDING, 0, True)
self.mock_publish.async_publish.reset_mock()
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_ARMED_AWAY, 0, True)
self.mock_publish.async_publish.reset_mock()
# Arm in night mode
alarm_control_panel.alarm_arm_night(self.hass)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_PENDING, 0, True)
self.mock_publish.async_publish.reset_mock()
# Fast-forward a little bit
future = dt_util.utcnow() + timedelta(seconds=1)
with patch(('homeassistant.components.alarm_control_panel.manual_mqtt.'
'dt_util.utcnow'), return_value=future):
fire_time_changed(self.hass, future)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_ARMED_NIGHT, 0, True)
self.mock_publish.async_publish.reset_mock()
# Disarm
alarm_control_panel.alarm_disarm(self.hass)
self.hass.block_till_done()
self.mock_publish.async_publish.assert_called_once_with(
'alarm/state', STATE_ALARM_DISARMED, 0, True)
|
|
# coding: utf-8
# pylint: disable=no-member
"""Online evaluation metric module."""
from __future__ import absolute_import
import math
import numpy
from . import ndarray
def check_label_shapes(labels, preds, shape=0):
if shape == 0:
label_shape, pred_shape = len(labels), len(preds)
else:
label_shape, pred_shape = labels.shape, preds.shape
if label_shape != pred_shape:
raise ValueError("Shape of labels {} does not match shape of "
"predictions {}".format(label_shape, pred_shape))
class EvalMetric(object):
"""Base class for all evaluation metrics.
.. note::
This is a base class that provides common metric interfaces.
One should not use this class directly, but instead create new metric
classes that extend it.
"""
def __init__(self, name, num=None):
self.name = name
self.num = num
self.reset()
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
raise NotImplementedError()
def reset(self):
"""Resets the internal evaluation result to initial state."""
if self.num is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
def get(self):
"""Gets the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
if self.num is None:
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
else:
names = ['%s_%d'%(self.name, i) for i in range(self.num)]
values = [x / y if y != 0 else float('nan') \
for x, y in zip(self.sum_metric, self.num_inst)]
return (names, values)
def get_name_value(self):
"""Returns zipped name and value pairs.
Returns
-------
list of tuples
A (name, value) tuple list.
"""
name, value = self.get()
if not isinstance(name, list):
name = [name]
if not isinstance(value, list):
value = [value]
return zip(name, value)
def __str__(self):
return "EvalMetric: {}".format(dict(self.get_name_value()))
class CompositeEvalMetric(EvalMetric):
"""Manages multiple evaluation metrics.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> eval_metrics_1 = mx.metric.Accuracy()
>>> eval_metrics_2 = mx.metric.F1()
>>> eval_metrics = mx.metric.CompositeEvalMetric()
>>> for child_metric in [eval_metrics_1, eval_metrics_2]:
>>> eval_metrics.add(child_metric)
>>> eval_metrics.update(labels = labels, preds = predicts)
>>> print eval_metrics.get()
(['accuracy', 'f1'], [0.6666666666666666, 0.8])
"""
def __init__(self, **kwargs):
super(CompositeEvalMetric, self).__init__('composite')
try:
self.metrics = kwargs['metrics']
except KeyError:
self.metrics = []
def add(self, metric):
"""Adds a child metric.
Parameters
----------
metric
A metric instance.
"""
self.metrics.append(metric)
def get_metric(self, index):
"""Returns a child metric.
Parameters
----------
index : int
Index of child metric in the list of metrics.
"""
try:
return self.metrics[index]
except IndexError:
return ValueError("Metric index {} is out of range 0 and {}".format(
index, len(self.metrics)))
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
for metric in self.metrics:
metric.update(labels, preds)
def reset(self):
"""Resets the internal evaluation result to initial state."""
try:
for metric in self.metrics:
metric.reset()
except AttributeError:
pass
def get(self):
"""Returns the current evaluation result.
Returns
-------
names : list of str
Name of the metrics.
values : list of float
Value of the evaluations.
"""
names = []
results = []
for metric in self.metrics:
result = metric.get()
names.append(result[0])
results.append(result[1])
return (names, results)
########################
# CLASSIFICATION METRICS
########################
class Accuracy(EvalMetric):
"""Computes accuracy classification score.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> acc = mx.metric.Accuracy()
>>> acc.update(preds = predicts, labels = labels)
>>> print acc.get()
('accuracy', 0.6666666666666666)
"""
def __init__(self):
super(Accuracy, self).__init__('accuracy')
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred_label in zip(labels, preds):
if pred_label.shape != label.shape:
pred_label = ndarray.argmax_channel(pred_label)
pred_label = pred_label.asnumpy().astype('int32')
label = label.asnumpy().astype('int32')
check_label_shapes(label, pred_label)
self.sum_metric += (pred_label.flat == label.flat).sum()
self.num_inst += len(pred_label.flat)
class TopKAccuracy(EvalMetric):
"""Computes top k predictions accuracy.
`TopKAccuracy` differs from Accuracy in that it considers the prediction
to be ``True`` as long as the ground truth label is in the top K
predicated labels.
If `top_k` = ``1``, then `TopKAccuracy` is identical to `Accuracy`.
Parameters
----------
top_k : int
Whether targets are in top k predictions.
Examples
--------
>>> np.random.seed(999)
>>> top_k = 3
>>> labels = [mx.nd.array([2, 6, 9, 2, 3, 4, 7, 8, 9, 6])]
>>> predicts = [mx.nd.array(np.random.rand(10, 10))]
>>> acc = mx.metric.TopKAccuracy(top_k=top_k)
>>> acc.update(labels, predicts)
>>> print acc.get()
('top_k_accuracy', 0.3)
"""
def __init__(self, **kwargs):
super(TopKAccuracy, self).__init__('top_k_accuracy')
try:
self.top_k = kwargs['top_k']
except KeyError:
self.top_k = 1
assert(self.top_k > 1), 'Please use Accuracy if top_k is no more than 1'
self.name += '_%d' % self.top_k
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred_label in zip(labels, preds):
assert(len(pred_label.shape) <= 2), 'Predictions should be no more than 2 dims'
pred_label = numpy.argsort(pred_label.asnumpy().astype('float32'), axis=1)
label = label.asnumpy().astype('int32')
check_label_shapes(label, pred_label)
num_samples = pred_label.shape[0]
num_dims = len(pred_label.shape)
if num_dims == 1:
self.sum_metric += (pred_label.flat == label.flat).sum()
elif num_dims == 2:
num_classes = pred_label.shape[1]
top_k = min(num_classes, self.top_k)
for j in range(top_k):
self.sum_metric += (pred_label[:, num_classes - 1 - j].flat == label.flat).sum()
self.num_inst += num_samples
class F1(EvalMetric):
"""Computes the F1 score of a binary classification problem.
The F1 score is equvalent to weighted average of the precision and recall,
where the best value is 1.0 and the worst value is 0.0. The formula for F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
The formula for precision and recall is::
precision = true_positives / (true_positives + false_positives)
recall = true_positives / (true_positives + false_negatives)
.. note::
This F1 score only supports binary classification.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0., 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0., 1., 1.])]
>>> acc = mx.metric.F1()
>>> acc.update(preds = predicts, labels = labels)
>>> print acc.get()
('f1', 0.8)
"""
def __init__(self):
super(F1, self).__init__('f1')
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
pred = pred.asnumpy()
label = label.asnumpy().astype('int32')
pred_label = numpy.argmax(pred, axis=1)
check_label_shapes(label, pred)
if len(numpy.unique(label)) > 2:
raise ValueError("F1 currently only supports binary classification.")
true_positives, false_positives, false_negatives = 0., 0., 0.
for y_pred, y_true in zip(pred_label, label):
if y_pred == 1 and y_true == 1:
true_positives += 1.
elif y_pred == 1 and y_true == 0:
false_positives += 1.
elif y_pred == 0 and y_true == 1:
false_negatives += 1.
if true_positives + false_positives > 0:
precision = true_positives / (true_positives + false_positives)
else:
precision = 0.
if true_positives + false_negatives > 0:
recall = true_positives / (true_positives + false_negatives)
else:
recall = 0.
if precision + recall > 0:
f1_score = 2 * precision * recall / (precision + recall)
else:
f1_score = 0.
self.sum_metric += f1_score
self.num_inst += 1
class Perplexity(EvalMetric):
"""Computes perplexity.
Perplexity is a measurement of how well a probability distribution
or model predicts a sample. A low perplexity indicates the model
is good at predicting the sample.
The perplexity of a model q is defined as
.. math::
b^{\\big(-\\frac{1}{N} \\sum_{i=1}^N \\log_b q(x_i) \\big)}
= \\exp \\big(-\\frac{1}{N} \\sum_{i=1}^N \\log q(x_i)\\big)
where we let `b = e`.
:math:`q(x_i)` is the predicted value of its ground truth
label on sample :math:`x_i`.
For example, we have three samples :math:`x_1, x_2, x_3` and their labels
are :math:`[0, 1, 1]`.
Suppose our model predicts :math:`q(x_1) = p(y_1 = 0 | x_1) = 0.3`
and :math:`q(x_2) = 1.0`,
:math:`q(x_3) = 0.6`. The perplexity of model q is
:math:`exp\\big(-(\\log 0.3 + \\log 1.0 + \\log 0.6) / 3\\big) = 1.77109762852`.
Parameters
----------
ignore_label : int or None
Index of invalid label to ignore when
counting. By default, sets to -1.
If set to `None`, it will include all entries.
axis : int (default -1)
The axis from prediction that was used to
compute softmax. By default use the last
axis.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> perp = mx.metric.Perplexity(ignore_label=None)
>>> perp.update(labels, predicts)
>>> print perp.get()
('Perplexity', 1.7710976285155853)
"""
def __init__(self, ignore_label, axis=-1):
super(Perplexity, self).__init__('Perplexity')
self.ignore_label = ignore_label
self.axis = axis
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
assert len(labels) == len(preds)
loss = 0.
num = 0
for label, pred in zip(labels, preds):
assert label.size == pred.size/pred.shape[-1], \
"shape mismatch: %s vs. %s"%(label.shape, pred.shape)
label = label.as_in_context(pred.context).reshape((label.size,))
pred = ndarray.pick(pred, label.astype(dtype='int32'), axis=self.axis)
if self.ignore_label is not None:
ignore = label == self.ignore_label
num -= ndarray.sum(ignore).asscalar()
pred = pred*(1-ignore) + ignore
loss -= ndarray.sum(ndarray.log(ndarray.maximum(1e-10, pred))).asscalar()
num += pred.size
self.sum_metric += loss
self.num_inst += num
def get(self):
"""Returns the current evaluation result.
Returns
-------
Tuple of (str, float)
Representing name of the metric and evaluation result.
"""
return (self.name, math.exp(self.sum_metric/self.num_inst))
####################
# REGRESSION METRICS
####################
class MAE(EvalMetric):
"""Computes Mean Absolute Error (MAE) loss.
The mean absolute error is given by
.. math::
\\frac{\\sum_i^n |y_i - \\hat{y}_i|}{n}
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> mean_absolute_error = mx.metric.MAE()
>>> mean_absolute_error.update(labels = labels, preds = predicts)
>>> print mean_absolute_error.get()
('mae', 0.5)
"""
def __init__(self):
super(MAE, self).__init__('mae')
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
if len(label.shape) == 1:
label = label.reshape(label.shape[0], 1)
self.sum_metric += numpy.abs(label - pred).mean()
self.num_inst += 1 # numpy.prod(label.shape)
class MSE(EvalMetric):
"""Computes Mean Squared Error (MSE) loss.
The mean squared error is given by
.. math::
\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> mean_squared_error = mx.metric.MSE()
>>> mean_squared_error.update(labels = labels, preds = predicts)
>>> print mean_squared_error.get()
('mse', 0.375)
"""
def __init__(self):
super(MSE, self).__init__('mse')
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
if len(label.shape) == 1:
label = label.reshape(label.shape[0], 1)
self.sum_metric += ((label - pred)**2.0).mean()
self.num_inst += 1 # numpy.prod(label.shape)
class RMSE(EvalMetric):
"""Computes Root Mean Squred Error (RMSE) loss.
The root mean squared error is given by
.. math::
\\sqrt{\\frac{\\sum_i^n (y_i - \\hat{y}_i)^2}{n}}
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> root_mean_squared_error = mx.metric.RMSE()
>>> root_mean_squared_error.update(labels = labels, preds = predicts)
>>> print root_mean_squared_error.get()
('rmse', 0.612372457981)
"""
def __init__(self):
super(RMSE, self).__init__('rmse')
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
if len(label.shape) == 1:
label = label.reshape(label.shape[0], 1)
self.sum_metric += numpy.sqrt(((label - pred)**2.0).mean())
self.num_inst += 1
class CrossEntropy(EvalMetric):
"""Computes Cross Entropy loss.
The cross entropy is given by
.. math::
-y\\log \\hat{y} + (1-y)\\log (1-\\hat{y})
Parameters
----------
eps : float
Cross Entropy loss is undefined for predicted value is 0 or 1,
so predicted values are added with the small constant.
Examples
--------
>>> predicts = [mx.nd.array([[0.3, 0.7], [0, 1.], [0.4, 0.6]])]
>>> labels = [mx.nd.array([0, 1, 1])]
>>> ce = mx.metric.CrossEntropy()
>>> ce.update(labels, predicts)
>>> print ce.get()
('cross-entropy', 0.57159948348999023)
"""
def __init__(self, eps=1e-8):
super(CrossEntropy, self).__init__('cross-entropy')
self.eps = eps
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
check_label_shapes(labels, preds)
for label, pred in zip(labels, preds):
label = label.asnumpy()
pred = pred.asnumpy()
label = label.ravel()
assert label.shape[0] == pred.shape[0]
prob = pred[numpy.arange(label.shape[0]), numpy.int64(label)]
self.sum_metric += (-numpy.log(prob + self.eps)).sum()
self.num_inst += label.shape[0]
class Torch(EvalMetric):
"""Dummy metric for torch criterions."""
def __init__(self, name='torch'):
super(Torch, self).__init__(name)
def update(self, _, preds):
for pred in preds:
self.sum_metric += pred.asnumpy().mean()
self.num_inst += 1
class Caffe(Torch):
"""Dummy metric for caffe criterions"""
def __init__(self):
super(Caffe, self).__init__('caffe')
class CustomMetric(EvalMetric):
"""Computes a customized evaluation metric.
The `feval` function can return a `tuple` of (sum_metric, num_inst) or return
an `int` sum_metric.
Parameters
----------
feval : callable(label, pred)
Customized evaluation function.
name : str, optional
The name of the metric. (the default is None).
allow_extra_outputs : bool, optional
If true, the prediction outputs can have extra outputs.
This is useful in RNN, where the states are also produced
in outputs for forwarding. (the default is False).
Examples
--------
>>> predicts = [mx.nd.array(np.array([3, -0.5, 2, 7]).reshape(4,1))]
>>> labels = [mx.nd.array(np.array([2.5, 0.0, 2, 8]).reshape(4,1))]
>>> feval = lambda x, y : (x + y).mean()
>>> eval_metrics = mx.metric.CustomMetric(feval=feval)
>>> eval_metrics.update(labels, predicts)
>>> print eval_metrics.get()
('custom(<lambda>)', 6.0)
"""
def __init__(self, feval, name=None, allow_extra_outputs=False):
if name is None:
name = feval.__name__
if name.find('<') != -1:
name = 'custom(%s)' % name
super(CustomMetric, self).__init__(name)
self._feval = feval
self._allow_extra_outputs = allow_extra_outputs
def update(self, labels, preds):
"""Updates the internal evaluation result.
Parameters
----------
labels : list of `NDArray`
The labels of the data.
preds : list of `NDArray`
Predicted values.
"""
if not self._allow_extra_outputs:
check_label_shapes(labels, preds)
for pred, label in zip(preds, labels):
label = label.asnumpy()
pred = pred.asnumpy()
reval = self._feval(label, pred)
if isinstance(reval, tuple):
(sum_metric, num_inst) = reval
self.sum_metric += sum_metric
self.num_inst += num_inst
else:
self.sum_metric += reval
self.num_inst += 1
# pylint: disable=invalid-name
def np(numpy_feval, name=None, allow_extra_outputs=False):
"""Creates a custom evaluation metric that receives its inputs as numpy arrays.
Parameters
----------
numpy_feval : callable(label, pred)
Custom evaluation function that receives labels and predictions for a minibatch
as numpy arrays and returns the corresponding custom metric as a floating point number.
name : str, optional
Name of the custom metric.
allow_extra_outputs : bool, optional
Whether prediction output is allowed to have extra outputs. This is useful in cases
like RNN where states are also part of output which can then be fed back to the RNN
in the next step. By default, extra outputs are not allowed.
Returns
-------
float
Custom metric corresponding to the provided labels and predictions.
Example
-------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label-pred))
...
>>> metric = mx.metric.np(custom_metric)
"""
def feval(label, pred):
"""Internal eval function."""
return numpy_feval(label, pred)
feval.__name__ = numpy_feval.__name__
return CustomMetric(feval, name, allow_extra_outputs)
# pylint: enable=invalid-name
def create(metric, **kwargs):
"""Creates evaluation metric from metric names or instances of EvalMetric
or a custom metric function.
Parameters
----------
metric : str or callable
Specifies the metric to create.
This argument must be one of the below:
- Name of a metric.
- An instance of `EvalMetric`.
- A list, each element of which is a metric or a metric name.
- An evaluation function that computes custom metric for a given batch of
labels and predictions.
Examples
--------
>>> def custom_metric(label, pred):
... return np.mean(np.abs(label - pred))
...
>>> metric1 = mx.metric.create('acc')
>>> metric2 = mx.metric.create(custom_metric)
>>> metric3 = mx.metric.create([metric1, metric2, 'rmse'])
"""
if callable(metric):
return CustomMetric(metric)
elif isinstance(metric, EvalMetric):
return metric
elif isinstance(metric, list):
composite_metric = CompositeEvalMetric()
for child_metric in metric:
composite_metric.add(create(child_metric, **kwargs))
return composite_metric
metrics = {
'acc': Accuracy,
'accuracy': Accuracy,
'ce': CrossEntropy,
'f1': F1,
'mae': MAE,
'mse': MSE,
'rmse': RMSE,
'top_k_accuracy': TopKAccuracy
}
try:
return metrics[metric.lower()](**kwargs)
except:
raise ValueError("Metric must be either callable or in {}".format(
metrics.keys()))
|
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2020, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigip_ssl_key_cert
short_description: Import/Delete SSL keys and certs from BIG-IP
description:
- This module imports/deletes SSL keys and certificates on a BIG-IP.
Keys can be imported from key files on the local disk, in PEM format.
Certificates can be imported from certificate and key files on the local
disk, in PEM format.
version_added: "1.6.0"
options:
key_content:
description:
- Sets the contents of a key directly to the specified value. This is
used with lookup plugins, or for anything with formatting or templating.
This must be provided when C(state) is C(present).
type: str
state:
description:
- When C(present), ensures the key and/or cert is uploaded to the
device. When C(absent), ensures the key and/or cert is removed
from the device. If the key and/or cert is currently in use, the module
will not be able to remove the key.
type: str
choices:
- present
- absent
default: present
key_name:
description:
- The name of the key.
type: str
passphrase:
description:
- Passphrase on key.
type: str
cert_content:
description:
- Sets the contents of a certificate directly to the specified value.
This is used with lookup plugins or for anything with formatting or
- C(content) must be provided when C(state) is C(present).
type: str
cert_name:
description:
- SSL Certificate Name. This is the cert name used when importing a certificate
into the BIG-IP. It also determines the filenames of the objects on the LTM.
type: str
issuer_cert:
description:
- Issuer certificate used for OCSP monitoring.
- This parameter is only valid on versions of BIG-IP 13.0.0 or above.
type: str
partition:
description:
- Device partition to manage resources on.
type: str
default: Common
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Nitin Khanna (@nitinthewiz)
'''
EXAMPLES = r'''
- name: Import both key and cert
bigip_ssl_key_cert:
key_content: "{{ lookup('file', 'key.pem') }}"
key_name: cert1
cert_content: "{{ lookup('file', 'cert.pem') }}"
cert_name: cert1
state: present
provider:
password: secret
server: lb.mydomain.com
user: admin
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import hashlib
import os
import re
from datetime import datetime
from ansible.module_utils.basic import (
AnsibleModule, env_fallback
)
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, transform_name,
f5_argument_spec, fq_name, merge_two_dicts
)
from ..module_utils.icontrol import (
TransactionContextManager, upload_file, tmos_version
)
from ..module_utils.teem import send_teem
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
class Parameters(AnsibleF5Parameters):
download_path = '/var/config/rest/downloads'
api_map = {
'sourcePath': 'source_path',
'issuerCert': 'issuer_cert',
}
api_attributes = [
'passphrase',
'sourcePath',
'issuerCert',
]
returnables = [
'checksum',
'source_path',
'issuer_cert',
]
updatables = [
'key_checksum',
'cert_checksum',
'content',
'issuer_cert',
'source_path',
]
class ApiParameters(Parameters):
@property
def key_filename(self):
if self._values['name'] is None:
return None
if not self._values['name'].endswith('.key'):
return None
return self._values['name']
@property
def key_source_path(self):
if self.key_filename is None:
return None
if self._values['key_source_path'] is None:
return None
else:
return self._values['key_source_path']
@property
def cert_filename(self):
if self._values['name'] is None:
return None
if not self._values['name'].endswith('.crt'):
return None
return self._values['name']
@property
def cert_source_path(self):
if self.cert_filename is None:
return None
if self._values['cert_source_path'] is None:
return None
else:
return self._values['cert_source_path']
@property
def key_checksum(self):
if self._values['key_checksum'] is None:
return None
pattern = r'SHA1:\d+:(?P<value>[\w+]{40})'
matches = re.match(pattern, self._values['key_checksum'])
if matches:
return matches.group('value')
@property
def cert_checksum(self):
if self._values['cert_checksum'] is None:
return None
pattern = r'SHA1:\d+:(?P<value>[\w+]{40})'
matches = re.match(pattern, self._values['cert_checksum'])
if matches:
return matches.group('value')
class ModuleParameters(Parameters):
def _get_hash(self, content):
k = hashlib.sha1()
s = StringIO(content)
while True:
data = s.read(1024)
if not data:
break
k.update(data.encode('utf-8'))
return k.hexdigest()
@property
def issuer_cert(self):
if self._values['issuer_cert'] is None:
return None
name = fq_name(self.partition, self._values['issuer_cert'])
if name.endswith('.crt'):
return name
else:
return name + '.crt'
@property
def key_filename(self):
if self.key_name is None:
return None
if self.key_name.endswith('.key'):
return self.key_name
else:
return self.key_name + '.key'
@property
def cert_filename(self):
if self.cert_name is None:
return None
if self.cert_name.endswith('.crt'):
return self.cert_name
else:
return self.cert_name + '.crt'
@property
def key_checksum(self):
if self.key_content is None:
return None
return self._get_hash(self.key_content)
@property
def cert_checksum(self):
if self.cert_content is None:
return None
return self._get_hash(self.cert_content)
@property
def key_source_path(self):
result = 'file://' + os.path.join(
self.download_path,
self.key_filename
)
return result
@property
def cert_source_path(self):
result = 'file://' + os.path.join(
self.download_path,
self.cert_filename
)
return result
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
return self.__default(param)
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
@property
def key_checksum(self):
if self.want.key_checksum is None:
return None
if self.want.key_checksum != self.have.key_checksum:
return self.want.key_checksum
@property
def key_source_path(self):
if self.want.key_source_path is None:
return None
if self.want.key_source_path == self.have.key_source_path:
if self.key_checksum:
return self.want.key_source_path
if self.want.key_source_path != self.have.key_source_path:
return self.want.key_source_path
@property
def cert_source_path(self):
if self.want.source_path is None:
return None
if self.want.source_path == self.have.source_path:
if self.cert_content:
return self.want.source_path
if self.want.source_path != self.have.source_path:
return self.want.source_path
@property
def cert_content(self):
if self.want.cert_checksum != self.have.checksum:
result = dict(
checksum=self.want.cert_checksum,
content=self.want.cert_content
)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = F5RestClient(**self.module.params)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def exec_module(self):
start = datetime.now().isoformat()
version = tmos_version(self.client)
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
send_teem(start, self.module, version)
return result
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def absent(self):
if self.exists():
return self.remove()
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
if self.want.key_filename:
self.remove_uploaded_file_from_device(self.want.key_filename)
if self.want.cert_filename:
self.remove_uploaded_file_from_device(self.want.cert_filename)
return True
def remove_uploaded_file_from_device(self, name):
filepath = '/var/config/rest/downloads/{0}'.format(name)
params = {
"command": "run",
"utilCmdArgs": filepath
}
uri = "https://{0}:{1}/mgmt/tm/util/unix-rm".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
return True
raise F5ModuleError(resp.content)
def exists(self):
# Can't use TransactionContextManager here because
# it expects end result code to be 200 or so. 404 causes
# TransactionContextManager to fail.
if self.want.key_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
# if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
# return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
if self.want.cert_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
# if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
# return True
errors = [401, 403, 409, 500, 501, 502, 503, 504]
if resp.status in errors or 'code' in response and response['code'] in errors:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return True
def upload_file_to_device(self, content, name):
url = 'https://{0}:{1}/mgmt/shared/file-transfer/uploads'.format(
self.client.provider['server'],
self.client.provider['server_port']
)
try:
upload_file(self.client, url, content, name)
except F5ModuleError:
raise F5ModuleError(
"Failed to upload the file."
)
def _prepare_links(self):
# this is to ensure no duplicates are in the provided collection
links = list()
if self.want.key_name:
key_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
links.append(key_link)
if self.want.cert_name:
cert_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
links.append(cert_link)
return links
def _prepare_links_for_update(self, params_dict):
# this is to ensure no duplicates are in the provided collection
links_and_params = list()
if self.want.key_name:
key_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
key_params_dict = params_dict.copy()
key_params_dict['sourcePath'] = self.want.key_source_path
links_and_params.append({'link': key_link, 'params': key_params_dict})
if self.want.cert_name:
cert_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
cert_params_dict = params_dict.copy()
cert_params_dict['sourcePath'] = self.want.cert_source_path
links_and_params.append({'link': cert_link, 'params': cert_params_dict})
return links_and_params
def _prepare_links_for_create(self, params_dict):
# this is to ensure no duplicates are in the provided collection
links_and_params = list()
if self.want.key_name:
key_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
key_params_dict = params_dict.copy()
key_params_dict['name'] = self.want.key_filename
key_params_dict['sourcePath'] = self.want.key_source_path
links_and_params.append({'link': key_link, 'params': key_params_dict})
if self.want.cert_name:
cert_link = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
cert_params_dict = params_dict.copy()
cert_params_dict['name'] = self.want.cert_filename
cert_params_dict['sourcePath'] = self.want.cert_source_path
links_and_params.append({'link': cert_link, 'params': cert_params_dict})
return links_and_params
def create_on_device(self):
params = self.changes.api_params()
params['partition'] = self.want.partition
# params['name'] = self.want.name
links_and_params = self._prepare_links_for_create(params)
if self.want.key_name:
key_content = StringIO(self.want.key_content)
self.upload_file_to_device(key_content, self.want.key_filename)
if self.want.cert_name:
cert_content = StringIO(self.want.cert_content)
self.upload_file_to_device(cert_content, self.want.cert_filename)
with TransactionContextManager(self.client) as transact:
for link in links_and_params:
resp = transact.api.post(link['link'], json=link['params'])
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201] or 'code' in response and
response['code'] in [200, 201]):
raise F5ModuleError(resp.content)
# This needs to be done because of the way that BIG-IP creates certificates.
#
# The extra params (such as OCSP and issuer stuff) are not available in the
# payload. In a nutshell, the available resource attributes *change* after
# a create so that *more* are available.
if self.want.cert_name:
params = self.want.api_params()
if params:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
resp = self.client.api.put(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]):
raise F5ModuleError(resp.content)
return True
def update_on_device(self):
params = self.changes.api_params()
if self.want.key_name:
key_content = StringIO(self.want.key_content)
self.upload_file_to_device(key_content, self.want.key_filename)
if self.want.cert_name:
cert_content = StringIO(self.want.cert_content)
self.upload_file_to_device(cert_content, self.want.cert_filename)
links_and_params = self._prepare_links_for_update(params)
with TransactionContextManager(self.client) as transact:
for link in links_and_params:
resp = transact.api.patch(link['link'], json=link['params'])
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201] or 'code' in response and
response['code'] in [200, 201]):
raise F5ModuleError(resp.content)
return True
def remove_from_device(self):
links = self._prepare_links()
with TransactionContextManager(self.client) as transact:
for link in links:
resp = transact.api.delete(link)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if not (resp.status in [200, 201]):
raise F5ModuleError(resp.content)
return True
def read_current_from_device(self):
final_response = {}
# TransactionContextManager cannot be used for reading, for
# whatever reason
if self.want.key_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-key/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.key_filename)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
response['key_checksum'] = response['checksum']
response['key_source_path'] = response['sourcePath']
final_response = merge_two_dicts(final_response, response)
else:
raise F5ModuleError(resp.content)
if self.want.cert_name:
uri = "https://{0}:{1}/mgmt/tm/sys/file/ssl-cert/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.cert_filename)
)
query = '?expandSubcollections=true'
resp = self.client.api.get(uri + query)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status in [200, 201] or 'code' in response and response['code'] in [200, 201]:
response['cert_checksum'] = response['checksum']
response['cert_source_path'] = response['sourcePath']
final_response = merge_two_dicts(final_response, response)
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=final_response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
key_name=dict(),
key_content=dict(),
passphrase=dict(
no_log=True
),
cert_name=dict(),
cert_content=dict(),
issuer_cert=dict(),
state=dict(
required=False,
default='present',
choices=['absent', 'present']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
try:
mm = ModuleManager(module=module)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
|
|
from ctypes import *
import unittest
# IMPORTANT INFO:
#
# Consider this call:
# func.restype = c_char_p
# func(c_char_p("123"))
# It returns
# "123"
#
# WHY IS THIS SO?
#
# argument tuple (c_char_p("123"), ) is destroyed after the function
# func is called, but NOT before the result is actually built.
#
# If the arglist would be destroyed BEFORE the result has been built,
# the c_char_p("123") object would already have a zero refcount,
# and the pointer passed to (and returned by) the function would
# probably point to deallocated space.
#
# In this case, there would have to be an additional reference to the argument...
import _ctypes_test
testdll = CDLL(_ctypes_test.__file__)
# Return machine address `a` as a (possibly long) non-negative integer.
# Starting with Python 2.5, id(anything) is always non-negative, and
# the ctypes addressof() inherits that via PyLong_FromVoidPtr().
def positive_address(a):
if a >= 0:
return a
# View the bits in `a` as unsigned instead.
import struct
num_bits = struct.calcsize("P") * 8 # num bits in native machine address
a += 1L << num_bits
assert a >= 0
return a
def c_wbuffer(init):
n = len(init) + 1
return (c_wchar * n)(*init)
class CharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_long
func.argtypes = None
def test_paramflags(self):
# function returns c_void_p result,
# and has a required parameter named 'input'
prototype = CFUNCTYPE(c_void_p, c_void_p)
func = prototype(("_testfunc_p_p", testdll),
((1, "input"),))
try:
func()
except TypeError, details:
self.failUnlessEqual(str(details), "required argument 'input' missing")
else:
self.fail("TypeError not raised")
self.failUnlessEqual(func(None), None)
self.failUnlessEqual(func(input=None), None)
def test_int_pointer_arg(self):
func = testdll._testfunc_p_p
func.restype = c_long
self.failUnlessEqual(0, func(0))
ci = c_int(0)
func.argtypes = POINTER(c_int),
self.failUnlessEqual(positive_address(addressof(ci)),
positive_address(func(byref(ci))))
func.argtypes = c_char_p,
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_short),
self.assertRaises(ArgumentError, func, byref(ci))
func.argtypes = POINTER(c_double),
self.assertRaises(ArgumentError, func, byref(ci))
def test_POINTER_c_char_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = POINTER(c_char),
self.failUnlessEqual(None, func(None))
self.failUnlessEqual("123", func("123"))
self.failUnlessEqual(None, func(c_char_p(None)))
self.failUnlessEqual("123", func(c_char_p("123")))
self.failUnlessEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.failUnlessEqual("a", func(pointer(ca))[0])
self.failUnlessEqual("a", func(byref(ca))[0])
def test_c_char_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_char_p,
self.failUnlessEqual(None, func(None))
self.failUnlessEqual("123", func("123"))
self.failUnlessEqual(None, func(c_char_p(None)))
self.failUnlessEqual("123", func(c_char_p("123")))
self.failUnlessEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.failUnlessEqual("a", func(pointer(ca))[0])
self.failUnlessEqual("a", func(byref(ca))[0])
def test_c_void_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_char_p
func.argtypes = c_void_p,
self.failUnlessEqual(None, func(None))
self.failUnlessEqual("123", func("123"))
self.failUnlessEqual("123", func(c_char_p("123")))
self.failUnlessEqual(None, func(c_char_p(None)))
self.failUnlessEqual("123", func(c_buffer("123")))
ca = c_char("a")
self.failUnlessEqual("a", func(pointer(ca))[0])
self.failUnlessEqual("a", func(byref(ca))[0])
func(byref(c_int()))
func(pointer(c_int()))
func((c_int * 3)())
try:
func.restype = c_wchar_p
except NameError:
pass
else:
self.failUnlessEqual(None, func(c_wchar_p(None)))
self.failUnlessEqual(u"123", func(c_wchar_p(u"123")))
def test_instance(self):
func = testdll._testfunc_p_p
func.restype = c_void_p
class X:
_as_parameter_ = None
func.argtypes = c_void_p,
self.failUnlessEqual(None, func(X()))
func.argtypes = None
self.failUnlessEqual(None, func(X()))
try:
c_wchar
except NameError:
pass
else:
class WCharPointersTestCase(unittest.TestCase):
def setUp(self):
func = testdll._testfunc_p_p
func.restype = c_int
func.argtypes = None
def test_POINTER_c_wchar_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = POINTER(c_wchar),
self.failUnlessEqual(None, func(None))
self.failUnlessEqual(u"123", func(u"123"))
self.failUnlessEqual(None, func(c_wchar_p(None)))
self.failUnlessEqual(u"123", func(c_wchar_p(u"123")))
self.failUnlessEqual(u"123", func(c_wbuffer(u"123")))
ca = c_wchar("a")
self.failUnlessEqual(u"a", func(pointer(ca))[0])
self.failUnlessEqual(u"a", func(byref(ca))[0])
def test_c_wchar_p_arg(self):
func = testdll._testfunc_p_p
func.restype = c_wchar_p
func.argtypes = c_wchar_p,
c_wchar_p.from_param(u"123")
self.failUnlessEqual(None, func(None))
self.failUnlessEqual("123", func(u"123"))
self.failUnlessEqual(None, func(c_wchar_p(None)))
self.failUnlessEqual("123", func(c_wchar_p("123")))
# XXX Currently, these raise TypeErrors, although they shouldn't:
self.failUnlessEqual("123", func(c_wbuffer("123")))
ca = c_wchar("a")
self.failUnlessEqual("a", func(pointer(ca))[0])
self.failUnlessEqual("a", func(byref(ca))[0])
class ArrayTest(unittest.TestCase):
def test(self):
func = testdll._testfunc_ai8
func.restype = POINTER(c_int)
func.argtypes = c_int * 8,
func((c_int * 8)(1, 2, 3, 4, 5, 6, 7, 8))
# This did crash before:
def func(): pass
CFUNCTYPE(None, c_int * 3)(func)
################################################################
if __name__ == '__main__':
unittest.main()
|
|
#!/usr/bin/env python
#
# Copyright 2013 Markus Gronholm <[email protected]> / Alshain Oy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import wx
import wx.stc
import sys
import pprint
import math
def read_file( fn ):
txt = ""
with open( fn, 'rb' ) as handle:
txt = handle.read()
return txt
def read_ladder( txt ):
lines = txt.strip().splitlines()
rungs = {}
current = None
for line in lines:
line = line.strip()
if len( line ) < 1:
continue
if line.startswith( "rung" ):
current = line[:-1]
rungs[current] = []
continue
if " " in line:
(a, b) = line.split(" ")
rungs[current].append( (a,b) )
else:
rungs[current].append( (line, ) )
return rungs
def stack_branches_old( rung ):
print rung
main = []
branches = {}
key = None
current = []
for entry in rung:
oper = entry[0]
if oper not in ["BRN", "BRX"]:
if not key:
main.append( entry )
else:
current.append( entry )
else:
param = entry[1]
if oper == "BRN":
if param != key:
current.append( entry )
else:
current.append( entry )
branches[key].append( current )
current = []
key = None
else:
if param not in branches:
branches[param] = []
main.append( ("BRANCH", param) )
current.append( entry )
key = param
else:
key = param
current.append( entry )
print branches, current
print ""
#pprint.pprint( main )
for i in range( len( main ) ):
entry = main[i]
if entry[0] == "BRANCH":
lines = branches[entry[1]]
out = []
for line in lines:
#out.append( stack_branches( line ) )
print "line", line
tmp = [line[0]]
tmp.extend( stack_branches( line[1:-1] ) )
tmp.append( line[-1] )
out.append( tmp )
main[i] = out
return main
def stack_branches( rung ):
branches = {}
main = []
current = []
key = None
for entry in rung:
oper = entry[0]
if oper == "BRX":
if not key:
key = entry[1]
current.append( entry )
if len( main ) > 0:
if main[-1] != ("BRANCH", key ):
main.append( ("BRANCH", key ) )
else:
main.append( ("BRANCH", key ) )
if key not in branches:
branches[key] = []
else:
current.append( entry )
elif oper == "BRN":
if entry[1] == key:
current.append( entry )
branches[key].append( current )
current = []
key = None
else:
current.append( entry )
else:
if key:
current.append( entry )
else:
main.append( entry )
out = []
for entry in main:
if entry[0] == "BRANCH":
branch = branches[entry[1]]
lines = []
for line in branch:
brx = line[0]
brn = line[-1]
middle = line[1:-1]
tmp = [brx]
tmp.extend( stack_branches( middle ) )
tmp.append( brn )
lines.append( tmp )
out.append( lines )
else:
out.append( entry )
return out
class MainWindow( wx.Frame ):
def __init__( self, parent, title, ladder, txt, filename ):
super( MainWindow, self ).__init__( parent, title = title, size = (800, 600) )
self.ladder = ladder
self.txt = txt
self.filename = filename
self.font = wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Courier 10 Pitch')
self.Bind( wx.EVT_PAINT, self.onPaint )
menubar = wx.MenuBar()
fileMenu = wx.Menu()
quitItem = fileMenu.Append( wx.ID_EXIT, "&Quit", "Quit Application" )
menubar.Append( fileMenu, "&File" )
self.SetMenuBar( menubar )
self.Bind( wx.EVT_MENU, self.onQuit, quitItem )
self.Show()
def onQuit( self, event ):
self.Close()
def draw_arc( self, dc, angle0, angle1, r, cx, cy, N = 8 ):
theta0 = angle0 * math.pi/180.0
theta1 = angle1 * math.pi/180.0
dphi = (theta1-theta0)/(N-1)
for i in range( N-1 ):
phi0 = theta0 + dphi*i
phi1 = theta0 + dphi*(i+1)
x0 = int( r*math.cos( phi0 ) + cx )
y0 = int( r*math.sin( phi0 ) + cy )
x1 = int( r*math.cos( phi1 ) + cx )
y1 = int( r*math.sin( phi1 ) + cy )
dc.DrawLine( x0, y0, x1, y1 )
def draw_LD( self, dc, name, x, y ):
dc.DrawLine( x, y, x+15, y )
dc.DrawLine( x+15, y-10, x+15, y+10 )
dc.DrawLine( x+25, y-10, x+25, y+10 )
dc.DrawLine( x+25, y, x+40, y )
dc.DrawText( name, x+10, y - 25 )
def draw_LDN( self, dc, name, x, y ):
dc.DrawLine( x, y, x+15, y )
dc.DrawLine( x+15, y-10, x+15, y+10 )
dc.DrawLine( x+25, y-10, x+25, y+10 )
dc.DrawLine( x+25, y, x+40, y )
dc.DrawText( name, x+10, y - 25 )
dc.DrawLine( x+15, y+10, x+25, y-10 )
def draw_A( self, dc, x, y ):
dc.DrawLine( x, y, x+40, y )
def draw_BRX( self, dc, first, id, x, y ):
if first:
dc.DrawLine( x, y, x+40, y )
#dc.DrawLine( x, y, x, y+40 )
dc.DrawText( str(id), x+3, y )
else:
dc.DrawLine( x, y, x+40, y )
def draw_BRN( self, dc, first, x, y ):
if first:
dc.DrawLine( x, y, x+40, y )
#dc.DrawLine( x, y, x, y+40 )
else:
#dc.DrawLine( x, y, x+40, y )
pass
def draw_ST( self, dc, name, x, y ):
dc.DrawLine( x, y, x+12, y )
dc.DrawLine( x+28, y, x+40, y )
dc.DrawText( name, x+10, y - 25 )
#dc.DrawArc( x+10, y-10, x+10, y+10, x+20, y )
self.draw_arc( dc, 180+20, 180-20, 30, x+42, y )
self.draw_arc( dc, 180+20+180, 180-20+180, 30, x-2, y )
def draw_STN( self, dc, name, x, y ):
dc.DrawLine( x, y, x+12, y )
dc.DrawLine( x+28, y, x+40, y )
dc.DrawText( name, x+10, y - 25 )
#dc.DrawArc( x+10, y-10, x+10, y+10, x+20, y )
self.draw_arc( dc, 180+20, 180-20, 30, x+42, y )
self.draw_arc( dc, 180+20+180, 180-20+180, 30, x-2, y )
dc.DrawLine( x+14, y+10, x+26, y-10 )
def draw_rung( self, dc, rung, old_x, old_y, first = True ):
x = old_x
y = old_y
max_y = y
end_x = old_x
end_y = old_y
for entry in rung:
if type( entry ) == tuple:
if entry[0] == "LD":
self.draw_LD( dc, entry[1], x, y )
elif entry[0] == "LDN":
self.draw_LDN( dc, entry[1], x, y )
elif entry[0] == "A":
self.draw_A( dc, x, y )
elif entry[0] == "BRX":
self.draw_BRX( dc, first, entry[1], x, y )
elif entry[0] == "BRN":
self.draw_BRN( dc, first, x, y )
elif entry[0] == "ST":
self.draw_ST( dc, entry[1], x, y )
elif entry[0] == "STN":
self.draw_STN( dc, entry[1], x, y )
else:
dc.DrawText( entry[0], x, y )
x += 40
end_y = y
else:
mx = []
my = []
mye = []
ly = y
lfirst = True
for e in entry:
kx, ky, ex, ey = self.draw_rung( dc, e, x, ly, lfirst )
mx.append( kx )
my.append( ky )
mye.append( ey )
ly = ky + 40
lfirst = False
#print ly
end_y = ly - 40
maxx = max(mx)
maxy = max(mye)
for i in range( len( mx ) ):
if my[i] < maxy:
dc.DrawLine( x, y, x, maxy )
dc.DrawLine( maxx-40, y, maxx-40, maxy )
if mx[i] < maxx:
dc.DrawLine( mx[i]-40, mye[i], maxx, mye[i] )
else:
if mx[i] < maxx:
dc.DrawLine( mx[i]-40, mye[i], maxx-40, mye[i] )
x = max( mx ) #+ 40
if max(my) > max_y:
max_y = max(my)
end_x = x
return x, max_y, end_x, end_y
def onPaint( self, event ):
dc = wx.PaintDC( self )
dc.SetFont( self.font )
x = 40
y = 40
dc.SetPen( wx.Pen('#000000') )
rungs = self.ladder.keys()
rungs.sort()
for key in rungs:
dc.DrawLine( 30, y, 40, y )
nx, ny, ex, ey = self.draw_rung( dc, stack_branches( self.ladder[key] ), x, y )
y = ny+80
dc.DrawLine( 30, 10, 30, y )
class EditorWindow( wx.Frame ):
def __init__( self, parent, title, main ):
super( EditorWindow, self ).__init__( parent, title = title, size = (300, 600) )
self.main = main
#self.font = wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Monospace')
self.font = wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_NORMAL, False, 'Monospace')
self.font2 = wx.Font(8, wx.FONTFAMILY_DEFAULT, wx.FONTSTYLE_NORMAL, wx.FONTWEIGHT_BOLD, False, 'Monospace')
self.init_ui()
self.init_menus()
dw, dh = wx.DisplaySize()
w, h = self.GetSize()
self.SetPosition( (dw-w-20, h) )
self.Show()
def init_menus( self ):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
editMenu = wx.Menu()
insertMenu = wx.Menu()
testtItem = wx.MenuItem( fileMenu, wx.NewId(), "&Refresh\tCtrl+R" )
fileMenu.AppendItem( testtItem )
saveItem = wx.MenuItem( fileMenu, wx.NewId(), "&Save\tCtrl+S" )
fileMenu.AppendItem( saveItem )
quitItem = fileMenu.Append( wx.ID_EXIT, "&Quit", "Quit Application" )
menubar.Append( fileMenu, "&File" )
undoItem = wx.MenuItem( editMenu, wx.ID_UNDO, "&Undo\tCtrl+Z" )
editMenu.AppendItem( undoItem )
redoItem = wx.MenuItem( editMenu, wx.ID_REDO, "&Redo\tCtrl+Y" )
editMenu.AppendItem( redoItem )
menubar.Append( editMenu, "&Edit" )
branchItem = wx.MenuItem( insertMenu, wx.ID_ANY, "&Branch\tCtrl+B" )
insertMenu.AppendItem( branchItem )
menubar.Append( insertMenu, "&Insert" )
self.SetMenuBar( menubar )
self.Bind( wx.EVT_MENU, self.onQuit, quitItem )
self.Bind( wx.EVT_MENU, self.onDemo, testtItem)
self.Bind( wx.EVT_MENU, self.onUndo, undoItem)
self.Bind( wx.EVT_MENU, self.onRedo, redoItem)
self.Bind( wx.EVT_MENU, self.onSave, saveItem)
self.Bind( wx.EVT_MENU, self.onInsertBranch, branchItem)
def onQuit( self, event ):
self.Close()
def onDemo( self, event ):
value = self.text.GetTextUTF8()
self.main.txt = value
self.main.ladder = read_ladder( self.main.txt )
self.main.Refresh()
def onUndo( self, event ):
self.text.Undo()
def onRedo( self, event ):
self.text.Redo()
def onInsertBranch( self, event ):
pos = self.text.GetCurrentPos()
lpos = self.text.GetCurrentLine()
txt = self.text.GetTextUTF8()
lines = txt.splitlines()
if lpos < 1 or pos < 1:
return None
indent = False
if txt[pos-1] != "\n":
indent = True
line = lines[lpos-1]
line = line.strip()
if line.startswith("BRN"):
parts = line.split()
dtab = len(lines[lpos-1]) - len( line )
if not indent:
out = "\t"*dtab + "BRX %s\n"%parts[1] + "\t"*(dtab+1) + "\n" + "\t"*dtab + "BRN %s\n"%parts[1]
else:
out = "BRX %s\n"%parts[1] + "\t"*(dtab+1) + "\n" + "\t"*dtab + "BRN %s\n"%parts[1]
self.text.InsertText( pos, out )
self.text.LineDown()
self.text.LineEnd()
else:
rstart = lpos
cnt = 1
while not lines[rstart].strip().startswith( "rung" ):
if lines[rstart].strip().startswith( "BRX" ):
cnt += 1
elif lines[rstart].strip().startswith( "BRN" ):
cnt -= 1
rstart -= 1
rstop = lpos
while rstop < len(lines) and not lines[rstop].strip().startswith( "rung" ):
rstop += 1
rng = lines[rstart:rstop]
branch_id = -1
for x in rng:
x = x.strip()
if x.startswith( "BRN" ):
parts = x.split()
if int(parts[1]) > branch_id:
branch_id = int(parts[1])
branch_id += 1
if indent:
out = "BRX %i\n"%branch_id + "\t"*(cnt+1) + "\n" + "\t"*cnt + "BRN %i\n"%branch_id
else:
out = "\t"*cnt + "BRX %i\n"%branch_id + "\t"*(cnt+1) + "\n" + "\t"*cnt + "BRN %i\n"%branch_id
self.text.InsertText( pos, out )
self.text.LineDown()
self.text.LineEnd()
def onSave( self, event ):
with open( self.main.filename, 'w' ) as handle:
handle.write( self.text.GetTextUTF8() )
def init_ui( self ):
panel = wx.Panel( self, -1 )
vbox = wx.BoxSizer( wx.VERTICAL )
hbox = wx.BoxSizer( wx.HORIZONTAL )
#self.text = wx.TextCtrl( panel, style = wx.TE_MULTILINE, pos = (10, 10), size = (280, 540) )
self.text = wx.stc.StyledTextCtrl( panel, style = wx.TE_MULTILINE, pos = (10, 10), size = (280, 540) )
faces = {"font": self.font2.GetFaceName(), "size": self.font2.GetPointSize() }
fonts = "face:%(font)s,size:%(size)d" % faces
self.text.StyleSetFont( 0, font = self.font )
self.text.AppendText( self.main.txt )
self.text.EmptyUndoBuffer()
self.text.SetLexer( wx.stc.STC_LEX_ASM )
self.text.SetKeyWords( 0, "ld ldn a brx brn st stn")
self.text.StyleSetSpec( wx.stc.STC_P_WORD, "fore:#000000,normal,"+fonts )
self.text.StyleSetSpec( wx.stc.STC_ASM_COMMENT, "fore:#330000,normal,"+fonts )
self.text.StyleSetSpec( wx.stc.STC_ASM_NUMBER, "fore:#000000,normal,"+fonts )
self.text.StyleSetSpec( wx.stc.STC_ASM_OPERATOR, "fore:#0000FF,bold,"+fonts )
self.text.StyleSetSpec( wx.stc.STC_ASM_CPUINSTRUCTION, "fore:#0000FF,bold,"+fonts )
self.text.StyleSetSpec( wx.stc.STC_ASM_IDENTIFIER, "fore:#227722,normal,"+fonts )
self.text.SetMarginType(1, wx.stc.STC_MARGIN_NUMBER)
self.text.SetMarginMask(1, 0)
self.text.SetMarginWidth(1, 25)
self.text.SetMarginLeft( 10 )
self.text.StyleSetSpec( wx.stc.STC_STYLE_LINENUMBER, "fore:#333333,normal,"+fonts )
#ladder = read_ladder( sys.argv[1] )
#
#rung = ladder["rung 0"]
#
#
#pprint.pprint( stack_branches( rung ) )
txt = read_file( sys.argv[1] )
ladder = read_ladder( txt )
#pprint.pprint( stack_branches( ladder["rung 2"] ) )
#tmp = stack_branches( ladder["rung 2"] )
#sys.exit(1)
app = wx.App()
main = MainWindow( None, title = "Ladder", ladder = ladder, txt = txt, filename = sys.argv[1])
EditorWindow( main, title = "List", main = main )
app.MainLoop()
|
|
# Copyright 2017 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementation of gRPC Python interceptors."""
import collections
import sys
import grpc
class _ServicePipeline(object):
def __init__(self, interceptors):
self.interceptors = tuple(interceptors)
def _continuation(self, thunk, index):
return lambda context: self._intercept_at(thunk, index, context)
def _intercept_at(self, thunk, index, context):
if index < len(self.interceptors):
interceptor = self.interceptors[index]
thunk = self._continuation(thunk, index + 1)
return interceptor.intercept_service(thunk, context)
else:
return thunk(context)
def execute(self, thunk, context):
return self._intercept_at(thunk, 0, context)
def service_pipeline(interceptors):
return _ServicePipeline(interceptors) if interceptors else None
class _ClientCallDetails(
collections.namedtuple('_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials',
'wait_for_ready', 'compression')),
grpc.ClientCallDetails):
pass
def _unwrap_client_call_details(call_details, default_details):
try:
method = call_details.method
except AttributeError:
method = default_details.method
try:
timeout = call_details.timeout
except AttributeError:
timeout = default_details.timeout
try:
metadata = call_details.metadata
except AttributeError:
metadata = default_details.metadata
try:
credentials = call_details.credentials
except AttributeError:
credentials = default_details.credentials
try:
wait_for_ready = call_details.wait_for_ready
except AttributeError:
wait_for_ready = default_details.wait_for_ready
try:
compression = call_details.compression
except AttributeError:
compression = default_details.compression
return method, timeout, metadata, credentials, wait_for_ready, compression
class _FailureOutcome(grpc.RpcError, grpc.Future, grpc.Call): # pylint: disable=too-many-ancestors
def __init__(self, exception, traceback):
super(_FailureOutcome, self).__init__()
self._exception = exception
self._traceback = traceback
def initial_metadata(self):
return None
def trailing_metadata(self):
return None
def code(self):
return grpc.StatusCode.INTERNAL
def details(self):
return 'Exception raised while intercepting the RPC'
def cancel(self):
return False
def cancelled(self):
return False
def is_active(self):
return False
def time_remaining(self):
return None
def running(self):
return False
def done(self):
return True
def result(self, ignored_timeout=None):
raise self._exception
def exception(self, ignored_timeout=None):
return self._exception
def traceback(self, ignored_timeout=None):
return self._traceback
def add_callback(self, unused_callback):
return False
def add_done_callback(self, fn):
fn(self)
def __iter__(self):
return self
def __next__(self):
raise self._exception
def next(self):
return self.__next__()
class _UnaryOutcome(grpc.Call, grpc.Future):
def __init__(self, response, call):
self._response = response
self._call = call
def initial_metadata(self):
return self._call.initial_metadata()
def trailing_metadata(self):
return self._call.trailing_metadata()
def code(self):
return self._call.code()
def details(self):
return self._call.details()
def is_active(self):
return self._call.is_active()
def time_remaining(self):
return self._call.time_remaining()
def cancel(self):
return self._call.cancel()
def add_callback(self, callback):
return self._call.add_callback(callback)
def cancelled(self):
return False
def running(self):
return False
def done(self):
return True
def result(self, ignored_timeout=None):
return self._response
def exception(self, ignored_timeout=None):
return None
def traceback(self, ignored_timeout=None):
return None
def add_done_callback(self, fn):
fn(self)
class _UnaryUnaryMultiCallable(grpc.UnaryUnaryMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
response, ignored_call = self._with_call(
request,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
return response
def _with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
try:
response, call = self._thunk(new_method).with_call(
request,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
return _UnaryOutcome(response, call)
except grpc.RpcError as rpc_error:
return rpc_error
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
call = self._interceptor.intercept_unary_unary(
continuation, client_call_details, request)
return call.result(), call
def with_call(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
return self._with_call(
request,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
def future(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method).future(
request,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_unary_unary(
continuation, client_call_details, request)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _UnaryStreamMultiCallable(grpc.UnaryStreamMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method)(
request,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_unary_stream(
continuation, client_call_details, request)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _StreamUnaryMultiCallable(grpc.StreamUnaryMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
response, ignored_call = self._with_call(
request_iterator,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
return response
def _with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request_iterator):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
try:
response, call = self._thunk(new_method).with_call(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
return _UnaryOutcome(response, call)
except grpc.RpcError as rpc_error:
return rpc_error
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
call = self._interceptor.intercept_stream_unary(
continuation, client_call_details, request_iterator)
return call.result(), call
def with_call(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
return self._with_call(
request_iterator,
timeout=timeout,
metadata=metadata,
credentials=credentials,
wait_for_ready=wait_for_ready,
compression=compression)
def future(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request_iterator):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method).future(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_stream_unary(
continuation, client_call_details, request_iterator)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _StreamStreamMultiCallable(grpc.StreamStreamMultiCallable):
def __init__(self, thunk, method, interceptor):
self._thunk = thunk
self._method = method
self._interceptor = interceptor
def __call__(self,
request_iterator,
timeout=None,
metadata=None,
credentials=None,
wait_for_ready=None,
compression=None):
client_call_details = _ClientCallDetails(self._method, timeout,
metadata, credentials,
wait_for_ready, compression)
def continuation(new_details, request_iterator):
(new_method, new_timeout, new_metadata, new_credentials,
new_wait_for_ready,
new_compression) = (_unwrap_client_call_details(
new_details, client_call_details))
return self._thunk(new_method)(
request_iterator,
timeout=new_timeout,
metadata=new_metadata,
credentials=new_credentials,
wait_for_ready=new_wait_for_ready,
compression=new_compression)
try:
return self._interceptor.intercept_stream_stream(
continuation, client_call_details, request_iterator)
except Exception as exception: # pylint:disable=broad-except
return _FailureOutcome(exception, sys.exc_info()[2])
class _Channel(grpc.Channel):
def __init__(self, channel, interceptor):
self._channel = channel
self._interceptor = interceptor
def subscribe(self, callback, try_to_connect=False):
self._channel.subscribe(callback, try_to_connect=try_to_connect)
def unsubscribe(self, callback):
self._channel.unsubscribe(callback)
def unary_unary(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.unary_unary(m, request_serializer, response_deserializer)
if isinstance(self._interceptor, grpc.UnaryUnaryClientInterceptor):
return _UnaryUnaryMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def unary_stream(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.unary_stream(m, request_serializer, response_deserializer)
if isinstance(self._interceptor, grpc.UnaryStreamClientInterceptor):
return _UnaryStreamMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def stream_unary(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.stream_unary(m, request_serializer, response_deserializer)
if isinstance(self._interceptor, grpc.StreamUnaryClientInterceptor):
return _StreamUnaryMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def stream_stream(self,
method,
request_serializer=None,
response_deserializer=None):
thunk = lambda m: self._channel.stream_stream(m, request_serializer, response_deserializer)
if isinstance(self._interceptor, grpc.StreamStreamClientInterceptor):
return _StreamStreamMultiCallable(thunk, method, self._interceptor)
else:
return thunk(method)
def _close(self):
self._channel.close()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self._close()
return False
def close(self):
self._channel.close()
def intercept_channel(channel, *interceptors):
for interceptor in reversed(list(interceptors)):
if not isinstance(interceptor, grpc.UnaryUnaryClientInterceptor) and \
not isinstance(interceptor, grpc.UnaryStreamClientInterceptor) and \
not isinstance(interceptor, grpc.StreamUnaryClientInterceptor) and \
not isinstance(interceptor, grpc.StreamStreamClientInterceptor):
raise TypeError('interceptor must be '
'grpc.UnaryUnaryClientInterceptor or '
'grpc.UnaryStreamClientInterceptor or '
'grpc.StreamUnaryClientInterceptor or '
'grpc.StreamStreamClientInterceptor or ')
channel = _Channel(channel, interceptor)
return channel
|
|
# coding=utf-8
# Copyright 2018 The Conversation-AI.github.io Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The Model Trainer class.
This provides an abstraction of Keras and TF.Estimator, and is intended for use
in text classification models (although it may generalize to other kinds of
problems).
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import os.path
import six
import tensorflow as tf
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.estimator import estimator as estimator_lib
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export.export_output import PredictOutput
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.training import optimizer as optimizer_lib
from tensorflow.python.lib.io import file_io
from tf_trainer.common import base_model
from tf_trainer.common import dataset_input as ds
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string('model_dir', None,
"Directory for the Estimator's model directory.")
tf.app.flags.DEFINE_string('warm_start_from', None,
'Existing checkpoint from which to start training.')
tf.app.flags.DEFINE_bool('enable_profiling', False,
'Enable profiler hook in estimator.')
tf.app.flags.DEFINE_integer(
'n_export', -1, 'Number of models to export.'
'If =-1, only the best checkpoint (wrt specified eval metric) is exported.'
'If =1, only the last checkpoint is exported.'
'If >1, we export `n_export` evenly-spaced checkpoints.')
tf.app.flags.DEFINE_string('key_name', 'comment_key',
'Name of a pass-thru integer id for batch scoring.')
tf.app.flags.DEFINE_integer('train_steps', 100000,
'The number of steps to train for.')
tf.app.flags.DEFINE_integer('eval_period', 1000,
'The number of steps per eval period.')
tf.app.flags.DEFINE_integer('eval_steps', None,
'Number of examples to eval for, default all.')
tf.app.flags.mark_flag_as_required('model_dir')
# Copied from:
# https://stackoverflow.com/questions/49846207/tensorflow-estimator-warm-start-from-and-model-dir
class InitHook(tf.train.SessionRunHook):
"""Initializes model from a checkpoint_path
Args:
checkpoint_dir: full path to dir containing the checkpoint
"""
def __init__(self, checkpoint_dir):
self.model_path = checkpoint_dir
self.initialized = False
def begin(self):
"""
Restore parameters if a pre-trained model is available and
we haven't trained previously.
"""
if not self.initialized:
#checkpoint = tf.train.latest_checkpoint(self.model_path)
all_checkpoints = file_io.get_matching_files(os.path.join(
self.model_path, 'model.ckpt-*.index'))
if not all_checkpoints:
raise ValueError('No checkpoint files found matching %s.' % (
self.model_path + '*'))
all_checkpoints = [x.replace('.index', '') for x in all_checkpoints]
all_checkpoints = sorted(all_checkpoints, key=lambda x: int(x.split('-')[-1]))
checkpoint = all_checkpoints[-1]
if checkpoint is None:
logging.info('No pre-trained model is available at %s, '
'training from scratch.' % self.model_path)
else:
logging.info('Pre-trained model {0} found in {1} - warmstarting.'.format(
checkpoint, self.model_path))
tf.train.warm_start(checkpoint)
self.initialized = True
# This function extends tf.contrib.estimator.forward_features.
# As the binary_head has a ClassificationOutput for serving_default,
# the check at the end of 'new_model_fn' fails in the initial fn.
def forward_features(estimator, keys, sparse_default_values=None):
"""Forward features to predictions dictionary.
In some cases, user wants to see some of the features in estimators prediction
output. As an example, consider a batch prediction service: The service simply
runs inference on the users graph and returns the results. Keys are essential
because there is no order guarantee on the outputs so they need to be rejoined
to the inputs via keys or transclusion of the inputs in the outputs.
Example:
```python
def input_fn():
features, labels = ...
features['unique_example_id'] = ...
features, labels
estimator = tf.estimator.LinearClassifier(...)
estimator = tf.contrib.estimator.forward_features(
estimator, 'unique_example_id')
estimator.train(...)
assert 'unique_example_id' in estimator.predict(...)
```
Args:
estimator: A `tf.estimator.Estimator` object.
keys: A `string`
sparse_default_values: A dict of `str` keys mapping the name of the sparse
features to be converted to dense, to the default value to use. Only
sparse features indicated in the dictionary are converted to dense and the
provided default value is used.
Returns:
A new `tf.estimator.Estimator` which forwards features to predictions.
Raises:
ValueError:
* if `keys` is already part of `predictions`. We don't allow
override.
* if 'keys' does not exist in `features`.
TypeError: if `keys` type is not one of `string` or list/tuple of `string`.
"""
def verify_key_types(keys): # pylint: disable=missing-docstring
if keys is None:
return keys
if isinstance(keys, six.string_types):
return [keys]
if not isinstance(keys, (list, tuple)):
raise TypeError('keys should be either a string or a list of strings. '
'Given: {}'.format(type(keys)))
for key in keys:
if not isinstance(key, six.string_types):
raise TypeError('All items in the given keys list should be a string. '
'There exist an item with type: {}'.format(type(key)))
return keys
def get_keys(features):
if keys is None:
return features.keys()
return keys
def verify_keys_and_predictions(features, predictions):
if not isinstance(predictions, dict):
raise ValueError(
'Predictions should be a dict to be able to forward features. '
'Given: {}'.format(type(predictions)))
for key in get_keys(features):
if key not in features:
raise ValueError(
'keys should be exist in features. Key "{}" is not in features '
'dict. features dict has following keys: {}. Please check '
'arguments of forward_features.'.format(key, features.keys()))
if key in predictions:
raise ValueError(
'Cannot forward feature key ({}). Since it does exist in '
'predictions. Existing prediction keys: {}. Please check arguments '
'of forward_features.'.format(key, predictions.keys()))
keys = verify_key_types(keys)
def new_model_fn(features, labels, mode, config): # pylint: disable=missing-docstring
spec = estimator.model_fn(features, labels, mode, config)
predictions = spec.predictions
if predictions is None:
return spec
verify_keys_and_predictions(features, predictions)
for key in get_keys(features):
feature = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor(
features[key])
if sparse_default_values and (key in sparse_default_values):
if not isinstance(feature, sparse_tensor_lib.SparseTensor):
raise ValueError(
'Feature ({}) is expected to be a `SparseTensor`.'.format(key))
feature = sparse_ops.sparse_tensor_to_dense(
feature, default_value=sparse_default_values[key])
if not isinstance(feature, ops.Tensor):
raise ValueError(
'Feature ({}) should be a Tensor. Please use `keys` '
'argument of forward_features to filter unwanted features, or'
'add key to argument `sparse_default_values`.'
'Type of features[{}] is {}.'.format(key, key, type(feature)))
predictions[key] = feature
spec = spec._replace(predictions=predictions)
if spec.export_outputs: # CHANGES HERE
outputs = spec.export_outputs['predict'].outputs
outputs[key] = spec.predictions[key]
spec.export_outputs['predict'] = tf.estimator.export.PredictOutput(
outputs)
spec.export_outputs[
'serving_default'] = tf.estimator.export.PredictOutput(outputs)
return spec
return estimator_lib.Estimator(
model_fn=new_model_fn,
model_dir=estimator.model_dir,
config=estimator.config)
class ModelTrainer(object):
"""Model Trainer."""
def __init__(self, dataset: ds.DatasetInput,
model: base_model.BaseModel,
warm_start_from: str = None) -> None:
self._dataset = dataset
self._model = model
self._warm_start_from = warm_start_from
self._estimator = model.estimator(self._model_dir())
def train_with_eval(self):
"""Train with periodic evaluation.
"""
training_hooks = None
if FLAGS.enable_profiling:
training_hooks = [
tf.train.ProfilerHook(
save_steps=10,
output_dir=os.path.join(self._model_dir(), 'profiler')),
]
if self._warm_start_from:
init_hook = InitHook(checkpoint_dir=self._warm_start_from)
if training_hooks:
training_hooks.append(init_hook)
else:
training_hooks = [init_hook]
train_spec = tf.estimator.TrainSpec(
input_fn=self._dataset.train_input_fn,
max_steps=FLAGS.train_steps,
hooks=training_hooks)
eval_spec = tf.estimator.EvalSpec(
input_fn=self._dataset.validate_input_fn,
steps=FLAGS.eval_steps,
throttle_secs=1)
self._estimator._config = self._estimator.config.replace(
save_checkpoints_steps=FLAGS.eval_period)
if FLAGS.n_export > 1 or FLAGS.n_export == -1:
self._estimator._config = self._estimator.config.replace(
keep_checkpoint_max=None)
tf.estimator.train_and_evaluate(self._estimator, train_spec, eval_spec)
def predict_on_dev(self, predict_keys=None):
checkpoints, _ = self._get_list_checkpoint(1, self._model_dir(),
None, None)
return self._estimator.predict(self._dataset.validate_input_fn,
predict_keys=predict_keys,
checkpoint_path=checkpoints[0])
def eval_dir(self):
return self._estimator.eval_dir()
def _model_dir(self):
"""Get Model Directory.
Used to scope logs to a given trial (when hyper param tuning) so that they
don't run over each other. When running locally it will just use the passed
in model_dir.
"""
return os.path.join(
FLAGS.model_dir,
json.loads(os.environ.get('TF_CONFIG', '{}')).get('task', {}).get(
'trial', ''))
def _add_estimator_key(self, estimator, example_key_name):
"""Adds a forward key to the model_fn of an estimator."""
estimator = forward_features(estimator, example_key_name)
return estimator
def _get_best_step_from_event_file(self,
event_file,
metrics_key,
is_first_metric_better_fn):
"""Find, in `event_file`, the step corresponding to the best metric.
Args:
event_file: The event file where to find the metrics.
metrics_key: The metric by which to determine the best checkpoint to save.
is_first_metric_better_fn: Comparison function to find best metric. Takes
in as arguments two numbers, returns true if first is better than
second. Default function says larger is better. Default value works for
AUC: higher is better.
Returns:
Best step (int).
"""
if not metrics_key:
return None
best_metric = None
best_step = None
for e in tf.train.summary_iterator(event_file):
for v in e.summary.value:
if v.tag == metrics_key:
metric = v.simple_value
if not best_step or is_first_metric_better_fn(metric, best_metric):
best_metric = metric
best_step = e.step
return best_step
def _get_best_checkpoint(self,
checkpoints,
metrics_key,
is_first_metric_better_fn):
"""Find the best checkpoint, according to `metrics_key`.
Args:
checkpoints: List of model checkpoints.
metrics_key: The metric by which to determine the best checkpoint to save.
is_first_metric_better_fn: Comparison function to find best metric. Takes
in as arguments two numbers, returns true if first is better than
second. Default function says larger is better. Default value works for
AUC: higher is better.
Returns:
Best checkpoint path.
"""
eval_event_dir = self._estimator.eval_dir()
event_files = file_io.list_directory(eval_event_dir)
if not event_files:
raise ValueError('No event files found in directory %s.' % eval_event_dir)
if len(event_files) > 1:
print('Multiple event files found in dir %s. Using last one.' % eval_event_dir)
event_file = os.path.join(eval_event_dir, event_files[-1])
# Use the best step to find the best checkpoint.
best_step = self._get_best_step_from_event_file(event_file, metrics_key,
is_first_metric_better_fn)
# If we couldn't find metrics_key in the event file, try again using loss.
if best_step is None:
print("Metrics key %s not found in metrics, using 'loss' as metric key." %
metrics_key)
metrics_key = "loss"
# Want the checkpoint with the lowest loss
is_first_metric_better_fn = lambda x, y: x < y
best_step = self._get_best_step_from_event_file(event_file, metrics_key,
is_first_metric_better_fn)
if best_step is None:
raise ValueError("Couldn't find 'loss' metric in event file %s." % event_file)
best_checkpoint_path = None
for checkpoint_path in checkpoints:
version = int(checkpoint_path.split('-')[-1])
if version == best_step:
best_checkpoint_path = checkpoint_path
if not best_checkpoint_path:
raise ValueError("Couldn't find checkpoint for best_step = %d." % best_step)
return best_checkpoint_path
def _get_list_checkpoint(self,
n_export,
model_dir,
metrics_key,
is_first_metric_better_fn):
"""Get the checkpoints that we want to export, as well as the ones to clean up.
Args:
n_export: Number of models to export.
model_dir: Directory containing the checkpoints.
metrics_key: The metric by which to determine the best checkpoint to save.
is_first_metric_better_fn: Comparison function to find best metric. Takes
in as arguments two numbers, returns true if first is better than
second. Default function says larger is better. Default value works for
AUC: higher is better.
Returns:
Tuple of:
List of checkpoint paths to export,
Set of checkpoint paths to delete.
If n_export==1, we take only the last checkpoint.
If n_export==-1, we take the best checkpoint, according to `metrics_key` and
`is_first_metric_better_fn`. The remaining checkpoints are deleted.
Otherwise, we consider the list of steps for each for which we have a
checkpoint. Then we choose n_export number of checkpoints such that their
steps are as equidistant as possible.
"""
all_checkpoints = file_io.get_matching_files(
os.path.join(model_dir, 'model.ckpt-*.index'))
if not all_checkpoints:
raise ValueError('No checkpoint files found matching model.ckpt-*.index.')
all_checkpoints = [x.replace('.index', '') for x in all_checkpoints]
all_checkpoints = sorted(all_checkpoints, key=lambda x: int(x.split('-')[-1]))
# Keep track of the checkpoints to export, and the ones to delete.
checkpoints_to_export = None
checkpoints_to_delete = None
if n_export == 1:
checkpoints_to_export = [all_checkpoints[-1]]
elif n_export == -1:
checkpoints_to_export = [self._get_best_checkpoint(all_checkpoints, metrics_key,
is_first_metric_better_fn)]
elif n_export > 1:
# We want to cover a distance of (len(checkpoints) - 1): for 3 points, we have a distance of 2.
# with a number of points of (n_export -1): because 1 point is set at the end.
step = float(len(all_checkpoints) - 1) / (n_export - 1)
if step <= 1: # Fewer checkpoints available than the desired number.
return all_checkpoints, None
checkpoints_to_export = [
all_checkpoints[int(i * step)] for i in range(n_export - 1)
]
checkpoints_to_export.append(all_checkpoints[-1])
if checkpoints_to_export:
checkpoints_to_delete = set(all_checkpoints) - set(checkpoints_to_export)
return checkpoints_to_export, checkpoints_to_delete
def export(self,
serving_input_fn,
example_key_name=None,
metrics_key=None,
is_first_metric_better_fn=lambda x, y: x > y,
delete_unexported_checkpoints=True):
"""Export model as a .pb.
Args:
serving_input_fn: An input function for inference graph.
example_key_name: Name of the example_key field (string).
If None, no example_key will be used.
metrics_key: The metric by which to determine the best checkpoint to save.
is_first_metric_better_fn: Comparison function to find best metric. Takes
in as arguments 3 numbers, returns true if first is better than
second. Default function says larger is better. Default value works for
AUC: higher is better.
delete_unexported_checkpoints: Boolean flag indicating whether or not to delete
the checkpoints that aren't exported. If False then all model checkpoints are
retained.
NOTE: if using a different metrics_key than AUC, make sure `is_first_metric_better_fn`
is updated accordingly.
Example keys are useful when doing batch predictions. Typically,
the predictions are done by a cluster of machines and the order of
the results is random. Here, we add a forward feature in the inference graph
(https://www.tensorflow.org/api_docs/python/tf/contrib/estimator/forward_features)
which will be used as an example unique identifier. In inference, the input
example includes an example_key field that is passed along by the estimator
and returned in the predictions.
"""
if FLAGS.n_export == -1:
if not is_first_metric_better_fn:
raise ValueError('Must provide valid `is_first_metric_better_fn` '
'when exporting best checkpoint.')
if not metrics_key:
print('No value provided for `metrics_key`. Using loss.')
metrics_key = 'loss'
is_first_metric_better_fn = lambda x, y: x < y
estimator = self._estimator
if example_key_name:
estimator = self._add_estimator_key(self._estimator, example_key_name)
checkpoints_to_export, checkpoints_to_delete = self._get_list_checkpoint(
FLAGS.n_export, self._model_dir(), metrics_key, is_first_metric_better_fn)
# Delete the checkpoints we don't want.
if checkpoints_to_delete and delete_unexported_checkpoints:
for ckpt in checkpoints_to_delete:
tf.train.remove_checkpoint(ckpt)
# Export the desired checkpoints.
if checkpoints_to_export:
for checkpoint_path in checkpoints_to_export:
version = checkpoint_path.split('-')[-1]
estimator.export_savedmodel(
export_dir_base=os.path.join(self._model_dir(), version),
serving_input_receiver_fn=serving_input_fn,
checkpoint_path=checkpoint_path)
|
|
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Load application resources from a known path.
Loading resources by specifying relative paths to filenames is often
problematic in Python, as the working directory is not necessarily the same
directory as the application's script files.
This module allows applications to specify a search path for resources.
Relative paths are taken to be relative to the application's __main__ module.
ZIP files can appear on the path; they will be searched inside. The resource
module also behaves as expected when applications are bundled using py2exe or
py2app.
As well as providing file references (with the `file` function), the resource
module also contains convenience functions for loading images, textures,
fonts, media and documents.
3rd party modules or packages not bound to a specific application should
construct their own `Loader` instance and override the path to use the
resources in the module's directory.
Path format
^^^^^^^^^^^
The resource path `path` (see also `Loader.__init__` and `Loader.path`)
is a list of locations to search for resources. Locations are searched in the
order given in the path. If a location is not valid (for example, if the
directory does not exist), it is skipped.
Locations in the path beginning with an ampersand (''@'' symbol) specify
Python packages. Other locations specify a ZIP archive or directory on the
filesystem. Locations that are not absolute are assumed to be relative to the
script home. Some examples::
# Search just the `res` directory, assumed to be located alongside the
# main script file.
path = ['res']
# Search the directory containing the module `levels.level1`, followed
# by the `res/images` directory.
path = ['@levels.level1', 'res/images']
Paths are always case-sensitive and forward slashes are always used as path
separators, even in cases when the filesystem or platform does not do this.
This avoids a common programmer error when porting applications between
platforms.
The default path is ``['.']``. If you modify the path, you must call
`reindex`.
:since: pyglet 1.1
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: $'
import os
import weakref
import sys
import zipfile
import pyglet
from pyglet.compat import BytesIO
class ResourceNotFoundException(Exception):
'''The named resource was not found on the search path.'''
def __init__(self, name):
message = ('Resource "%s" was not found on the path. '
'Ensure that the filename has the correct captialisation.') % name
Exception.__init__(self, message)
def get_script_home():
'''Get the directory containing the program entry module.
For ordinary Python scripts, this is the directory containing the
``__main__`` module. For executables created with py2exe the result is
the directory containing the running executable file. For OS X bundles
created using Py2App the result is the Resources directory within the
running bundle.
If none of the above cases apply and the file for ``__main__`` cannot
be determined the working directory is returned.
When the script is being run by a Python profiler, this function
may return the directory where the profiler is running instead of
the directory of the real script. To workaround this behaviour the
full path to the real script can be specified in `pyglet.resource.path`.
:rtype: str
'''
frozen = getattr(sys, 'frozen', None)
if frozen in ('windows_exe', 'console_exe'):
return os.path.dirname(sys.executable)
elif frozen == 'macosx_app':
# py2app
return os.environ['RESOURCEPATH']
else:
main = sys.modules['__main__']
if hasattr(main, '__file__'):
return os.path.dirname(os.path.abspath(main.__file__))
else:
# cx_Freeze
return os.path.dirname(sys.executable)
# Probably interactive
return ''
def get_settings_path(name):
'''Get a directory to save user preferences.
Different platforms have different conventions for where to save user
preferences, saved games, and settings. This function implements those
conventions. Note that the returned path may not exist: applications
should use ``os.makedirs`` to construct it if desired.
On Linux, a directory `name` in the user's configuration directory is
returned (usually under ``~/.config``).
On Windows (including under Cygwin) the `name` directory in the user's
``Application Settings`` directory is returned.
On Mac OS X the `name` directory under ``~/Library/Application Support``
is returned.
:Parameters:
`name` : str
The name of the application.
:rtype: str
'''
if pyglet.compat_platform in ('cygwin', 'win32'):
if 'APPDATA' in os.environ:
return os.path.join(os.environ['APPDATA'], name)
else:
return os.path.expanduser('~/%s' % name)
elif pyglet.compat_platform == 'darwin':
return os.path.expanduser('~/Library/Application Support/%s' % name)
elif pyglet.compat_platform.startswith('linux'):
if 'XDG_CONFIG_HOME' in os.environ:
return os.path.join(os.environ['XDG_CONFIG_HOME'], name)
else:
return os.path.expanduser('~/.config/%s' % name)
else:
return os.path.expanduser('~/.%s' % name)
class Location(object):
'''Abstract resource location.
Given a location, a file can be loaded from that location with the `open`
method. This provides a convenient way to specify a path to load files
from, and not necessarily have that path reside on the filesystem.
'''
def open(self, filename, mode='rb'):
'''Open a file at this location.
:Parameters:
`filename` : str
The filename to open. Absolute paths are not supported.
Relative paths are not supported by most locations (you
should specify only a filename with no path component).
`mode` : str
The file mode to open with. Only files opened on the
filesystem make use of this parameter; others ignore it.
:rtype: file object
'''
raise NotImplementedError('abstract')
class FileLocation(Location):
'''Location on the filesystem.
'''
def __init__(self, path):
'''Create a location given a relative or absolute path.
:Parameters:
`path` : str
Path on the filesystem.
'''
self.path = path
def open(self, filename, mode='rb'):
return open(os.path.join(self.path, filename), mode)
class ZIPLocation(Location):
'''Location within a ZIP file.
'''
def __init__(self, zip, dir):
'''Create a location given an open ZIP file and a path within that
file.
:Parameters:
`zip` : ``zipfile.ZipFile``
An open ZIP file from the ``zipfile`` module.
`dir` : str
A path within that ZIP file. Can be empty to specify files at
the top level of the ZIP file.
'''
self.zip = zip
self.dir = dir
def open(self, filename, mode='rb'):
if self.dir:
path = self.dir + '/' + filename
else:
path = filename
text = self.zip.read(path)
return BytesIO(text)
class URLLocation(Location):
'''Location on the network.
This class uses the ``urlparse`` and ``urllib2`` modules to open files on
the network given a URL.
'''
def __init__(self, base_url):
'''Create a location given a base URL.
:Parameters:
`base_url` : str
URL string to prepend to filenames.
'''
self.base = base_url
def open(self, filename, mode='rb'):
import urllib.parse
import urllib.request, urllib.error, urllib.parse
url = urllib.parse.urljoin(self.base, filename)
return urllib.request.urlopen(url)
class Loader(object):
'''Load program resource files from disk.
The loader contains a search path which can include filesystem
directories, ZIP archives and Python packages.
:Ivariables:
`path` : list of str
List of search locations. After modifying the path you must
call the `reindex` method.
`script_home` : str
Base resource location, defaulting to the location of the
application script.
'''
def __init__(self, path=None, script_home=None):
'''Create a loader for the given path.
If no path is specified it defaults to ``['.']``; that is, just the
program directory.
See the module documentation for details on the path format.
:Parameters:
`path` : list of str
List of locations to search for resources.
`script_home` : str
Base location of relative files. Defaults to the result of
`get_script_home`.
'''
if path is None:
path = ['.']
if type(path) in (str, str):
path = [path]
self.path = list(path)
if script_home is None:
script_home = get_script_home()
self._script_home = script_home
self._index = None
# Map bin size to list of atlases
self._texture_atlas_bins = {}
def _require_index(self):
if self._index is None:
self.reindex()
def reindex(self):
'''Refresh the file index.
You must call this method if `path` is changed or the filesystem
layout changes.
'''
# map name to image etc.
self._cached_textures = weakref.WeakValueDictionary()
self._cached_images = weakref.WeakValueDictionary()
self._cached_animations = weakref.WeakValueDictionary()
self._index = {}
for path in self.path:
if path.startswith('@'):
# Module
name = path[1:]
try:
module = __import__(name)
except:
continue
for component in name.split('.')[1:]:
module = getattr(module, component)
if hasattr(module, '__file__'):
path = os.path.dirname(module.__file__)
else:
path = '' # interactive
elif not os.path.isabs(path):
# Add script base unless absolute
assert '\\' not in path, \
'Backslashes not permitted in relative path'
path = os.path.join(self._script_home, path)
if os.path.isdir(path):
# Filesystem directory
path = path.rstrip(os.path.sep)
location = FileLocation(path)
for dirpath, dirnames, filenames in os.walk(path):
dirpath = dirpath[len(path) + 1:]
# Force forward slashes for index
if dirpath:
parts = [_f for _f in dirpath.split(os.sep) if _f]
dirpath = '/'.join(parts)
for filename in filenames:
if dirpath:
index_name = dirpath + '/' + filename
else:
index_name = filename
self._index_file(index_name, location)
else:
# Find path component that is the ZIP file.
dir = ''
old_path = None
while path and not os.path.isfile(path):
old_path = path
path, tail_dir = os.path.split(path)
if path == old_path:
break
dir = '/'.join((tail_dir, dir))
if path == old_path:
continue
dir = dir.rstrip('/')
# path is a ZIP file, dir resides within ZIP
if path and zipfile.is_zipfile(path):
zip = zipfile.ZipFile(path, 'r')
location = ZIPLocation(zip, dir)
for zip_name in zip.namelist():
#zip_name_dir, zip_name = os.path.split(zip_name)
#assert '\\' not in name_dir
#assert not name_dir.endswith('/')
if zip_name.startswith(dir):
if dir:
zip_name = zip_name[len(dir)+1:]
self._index_file(zip_name, location)
def _index_file(self, name, location):
if name not in self._index:
self._index[name] = location
def file(self, name, mode='rb'):
'''Load a resource.
:Parameters:
`name` : str
Filename of the resource to load.
`mode` : str
Combination of ``r``, ``w``, ``a``, ``b`` and ``t`` characters
with the meaning as for the builtin ``open`` function.
:rtype: file object
'''
self._require_index()
try:
location = self._index[name]
return location.open(name, mode)
except KeyError:
raise ResourceNotFoundException(name)
def location(self, name):
'''Get the location of a resource.
This method is useful for opening files referenced from a resource.
For example, an HTML file loaded as a resource might reference some
images. These images should be located relative to the HTML file, not
looked up individually in the loader's path.
:Parameters:
`name` : str
Filename of the resource to locate.
:rtype: `Location`
'''
self._require_index()
try:
return self._index[name]
except KeyError:
raise ResourceNotFoundException(name)
def add_font(self, name):
'''Add a font resource to the application.
Fonts not installed on the system must be added to pyglet before they
can be used with `font.load`. Although the font is added with
its filename using this function, it is loaded by specifying its
family name. For example::
resource.add_font('action_man.ttf')
action_man = font.load('Action Man')
:Parameters:
`name` : str
Filename of the font resource to add.
'''
self._require_index()
from pyglet import font
file = self.file(name)
font.add_file(file)
def _alloc_image(self, name, atlas=True):
file = self.file(name)
try:
img = pyglet.image.load(name, file=file)
finally:
file.close()
if not atlas:
return img.get_texture(True)
# find an atlas suitable for the image
bin = self._get_texture_atlas_bin(img.width, img.height)
if bin is None:
return img.get_texture(True)
return bin.add(img)
def _get_texture_atlas_bin(self, width, height):
'''A heuristic for determining the atlas bin to use for a given image
size. Returns None if the image should not be placed in an atlas (too
big), otherwise the bin (a list of TextureAtlas).
'''
# Large images are not placed in an atlas
if width > 128 or height > 128:
return None
# Group images with small height separately to larger height (as the
# allocator can't stack within a single row).
bin_size = 1
if height > 32:
bin_size = 2
try:
bin = self._texture_atlas_bins[bin_size]
except KeyError:
bin = self._texture_atlas_bins[bin_size] = \
pyglet.image.atlas.TextureBin()
return bin
def image(self, name, flip_x=False, flip_y=False, rotate=0, atlas=True):
'''Load an image with optional transformation.
This is similar to `texture`, except the resulting image will be
packed into a `TextureBin` if it is an appropriate size for packing.
This is more efficient than loading images into separate textures.
:Parameters:
`name` : str
Filename of the image source to load.
`flip_x` : bool
If True, the returned image will be flipped horizontally.
`flip_y` : bool
If True, the returned image will be flipped vertically.
`rotate` : int
The returned image will be rotated clockwise by the given
number of degrees (a multiple of 90).
`atlas` : bool
If True, the image will be loaded into an atlas managed by
pyglet. If atlas loading is not appropriate for specific
texturing reasons (e.g. border control is required) then set
this argument to False.
:rtype: `Texture`
:return: A complete texture if the image is large or not in an atlas,
otherwise a `TextureRegion` of a texture atlas.
'''
self._require_index()
if name in self._cached_images:
identity = self._cached_images[name]
else:
identity = self._cached_images[name] = self._alloc_image(name,
atlas=atlas)
if not rotate and not flip_x and not flip_y:
return identity
return identity.get_transform(flip_x, flip_y, rotate)
def animation(self, name, flip_x=False, flip_y=False, rotate=0):
'''Load an animation with optional transformation.
Animations loaded from the same source but with different
transformations will use the same textures.
:Parameters:
`name` : str
Filename of the animation source to load.
`flip_x` : bool
If True, the returned image will be flipped horizontally.
`flip_y` : bool
If True, the returned image will be flipped vertically.
`rotate` : int
The returned image will be rotated clockwise by the given
number of degrees (a multiple of 90).
:rtype: `Animation`
'''
self._require_index()
try:
identity = self._cached_animations[name]
except KeyError:
animation = pyglet.image.load_animation(name, self.file(name))
bin = self._get_texture_atlas_bin(animation.get_max_width(),
animation.get_max_height())
if bin:
animation.add_to_texture_bin(bin)
identity = self._cached_animations[name] = animation
if not rotate and not flip_x and not flip_y:
return identity
return identity.get_transform(flip_x, flip_y, rotate)
def get_cached_image_names(self):
'''Get a list of image filenames that have been cached.
This is useful for debugging and profiling only.
:rtype: list
:return: List of str
'''
self._require_index()
return list(self._cached_images.keys())
def get_cached_animation_names(self):
'''Get a list of animation filenames that have been cached.
This is useful for debugging and profiling only.
:rtype: list
:return: List of str
'''
self._require_index()
return list(self._cached_animations.keys())
def get_texture_bins(self):
'''Get a list of texture bins in use.
This is useful for debugging and profiling only.
:rtype: list
:return: List of `TextureBin`
'''
self._require_index()
return list(self._texture_atlas_bins.values())
def media(self, name, streaming=True):
'''Load a sound or video resource.
The meaning of `streaming` is as for `media.load`. Compressed
sources cannot be streamed (that is, video and compressed audio
cannot be streamed from a ZIP archive).
:Parameters:
`name` : str
Filename of the media source to load.
`streaming` : bool
True if the source should be streamed from disk, False if
it should be entirely decoded into memory immediately.
:rtype: `media.Source`
'''
self._require_index()
from pyglet import media
try:
location = self._index[name]
if isinstance(location, FileLocation):
# Don't open the file if it's streamed from disk -- AVbin
# needs to do it.
path = os.path.join(location.path, name)
return media.load(path, streaming=streaming)
else:
file = location.open(name)
return media.load(name, file=file, streaming=streaming)
except KeyError:
raise ResourceNotFoundException(name)
def texture(self, name):
'''Load a texture.
The named image will be loaded as a single OpenGL texture. If the
dimensions of the image are not powers of 2 a `TextureRegion` will
be returned.
:Parameters:
`name` : str
Filename of the image resource to load.
:rtype: `Texture`
'''
self._require_index()
if name in self._cached_textures:
return self._cached_textures[name]
file = self.file(name)
texture = pyglet.image.load(name, file=file).get_texture()
self._cached_textures[name] = texture
return texture
def html(self, name):
'''Load an HTML document.
:Parameters:
`name` : str
Filename of the HTML resource to load.
:rtype: `FormattedDocument`
'''
self._require_index()
file = self.file(name)
return pyglet.text.decode_html(file.read(), self.location(name))
def attributed(self, name):
'''Load an attributed text document.
See `pyglet.text.formats.attributed` for details on this format.
:Parameters:
`name` : str
Filename of the attribute text resource to load.
:rtype: `FormattedDocument`
'''
self._require_index()
file = self.file(name)
return pyglet.text.load(name, file, 'text/vnd.pyglet-attributed')
def text(self, name):
'''Load a plain text document.
:Parameters:
`name` : str
Filename of the plain text resource to load.
:rtype: `UnformattedDocument`
'''
self._require_index()
file = self.file(name)
return pyglet.text.load(name, file, 'text/plain')
def get_cached_texture_names(self):
'''Get the names of textures currently cached.
:rtype: list of str
'''
self._require_index()
return list(self._cached_textures.keys())
#: Default resource search path.
#:
#: Locations in the search path are searched in order and are always
#: case-sensitive. After changing the path you must call `reindex`.
#:
#: See the module documentation for details on the path format.
#:
#: :type: list of str
path = []
class _DefaultLoader(Loader):
def _get_path(self):
return path
def _set_path(self, value):
global path
path = value
path = property(_get_path, _set_path)
_default_loader = _DefaultLoader()
reindex = _default_loader.reindex
file = _default_loader.file
location = _default_loader.location
add_font = _default_loader.add_font
image = _default_loader.image
animation = _default_loader.animation
get_cached_image_names = _default_loader.get_cached_image_names
get_cached_animation_names = _default_loader.get_cached_animation_names
get_texture_bins = _default_loader.get_texture_bins
media = _default_loader.media
texture = _default_loader.texture
html = _default_loader.html
attributed = _default_loader.attributed
text = _default_loader.text
get_cached_texture_names = _default_loader.get_cached_texture_names
|
|
"""Working day tools
"""
import warnings
from calendar import monthrange
from datetime import date, timedelta, datetime
from math import pi
import ephem
import pytz
from calverter import Calverter
from dateutil import easter
from lunardate import LunarDate
MON, TUE, WED, THU, FRI, SAT, SUN = range(7)
class Calendar(object):
FIXED_HOLIDAYS = ()
def __init__(self):
self._holidays = {}
def get_fixed_holidays(self, year):
"""Return the fixed days according to the FIXED_HOLIDAYS class property
"""
days = []
for month, day, label in self.FIXED_HOLIDAYS:
days.append((date(year, month, day), label))
return days
def get_variable_days(self, year):
return []
def get_calendar_holidays(self, year):
"""Get calendar holidays.
If you want to override this, please make sure that it **must** return
a list of tuples (date, holiday_name)."""
return self.get_fixed_holidays(year) + self.get_variable_days(year)
def holidays(self, year=None):
"""Computes holidays (non-working days) for a given year.
Return a 2-item tuple, composed of the date and a label."""
if not year:
year = date.today().year
if year in self._holidays:
return self._holidays[year]
# Here we process the holiday specific calendar
temp_calendar = tuple(self.get_calendar_holidays(year))
# it is sorted
self._holidays[year] = sorted(temp_calendar)
return self._holidays[year]
def get_holiday_label(self, day):
"""Return the label of the holiday, if the date is a holiday"""
# a little exception: chop the datetime type
if type(day) is datetime:
day = day.date()
return {day: label for day, label in self.holidays(day.year)
}.get(day)
def holidays_set(self, year=None):
"Return a quick date index (set)"
return set([day for day, label in self.holidays(year)])
def get_weekend_days(self):
"""Return a list (or a tuple) of weekdays that are *not* working days.
e.g: return (SAT, SUN,)
"""
raise NotImplementedError("Your Calendar class must implement the"
" `get_weekend_days` method")
def is_working_day(self, day,
extra_working_days=None, extra_holidays=None):
"""Return True if it's a working day.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_working_days``, you'll state that these dates
**are** working days.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
Please note that the ``extra_working_days`` list has priority over the
``extra_holidays`` list.
"""
# a little exception: chop the datetime type
if type(day) is datetime:
day = day.date()
# Extra lists exceptions
if extra_working_days and day in extra_working_days:
return True
# Regular rules
if day.weekday() in self.get_weekend_days():
return False
return not self.is_holiday(day, extra_holidays=extra_holidays)
def is_holiday(self, day, extra_holidays=None):
"""Return True if it's an holiday.
In addition to the regular holidays, you can add exceptions.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
"""
# a little exception: chop the datetime type
if type(day) is datetime:
day = day.date()
if extra_holidays and day in extra_holidays:
return True
return day in self.holidays_set(day.year)
def add_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None):
"""Add `delta` working days to the date.
the ``delta`` parameter might be positive or negative. If it's
negative, you may want to use the ``sub_working_days()`` method with
a positive ``delta`` argument.
By providing ``extra_working_days``, you'll state that these dates
**are** working days.
By providing ``extra_holidays``, you'll state that these dates **are**
holidays, even if not in the regular calendar holidays (or weekends).
Please note that the ``extra_working_days`` list has priority over the
``extra_holidays`` list.
"""
days = 0
temp_day = day
day_added = 1 if delta >= 0 else -1
delta = abs(delta)
while days < delta:
temp_day = temp_day + timedelta(days=day_added)
if self.is_working_day(temp_day,
extra_working_days=extra_working_days,
extra_holidays=extra_holidays):
days += 1
return temp_day
def sub_working_days(self, day, delta,
extra_working_days=None, extra_holidays=None):
"""
Substract `delta` working days to the date.
This method is a shortcut / helper. Users may want to use either::
cal.add_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
The other parameters are to be used exactly as in the
``add_working_days`` method.
A negative ``delta`` argument will be converted into its absolute
value. Hence, the two following calls are equivalent::
cal.sub_working_days(my_date, -7)
cal.sub_working_days(my_date, 7)
"""
delta = abs(delta)
return self.add_working_days(
day, -delta, extra_working_days, extra_holidays)
def find_following_working_day(self, day):
"Looks for the following working day"
while day.weekday() in self.get_weekend_days():
day = day + timedelta(days=1)
return day
@staticmethod
def get_nth_weekday_in_month(year, month, weekday, n=1, start=None):
"""Get the nth weekday in a given month. e.g:
>>> # the 1st monday in Jan 2013
>>> Calendar.get_nth_weekday_in_month(2013, 1, MON)
datetime.date(2013, 1, 7)
>>> # The 2nd monday in Jan 2013
>>> Calendar.get_nth_weekday_in_month(2013, 1, MON, 2)
datetime.date(2013, 1, 14)
"""
day = date(year, month, 1)
if start:
day = start
counter = 0
while True:
if day.month != month:
# Don't forget to break if "n" is too big
return None
if day.weekday() == weekday:
counter += 1
if counter == n:
break
day = day + timedelta(days=1)
return day
@staticmethod
def get_last_weekday_in_month(year, month, weekday):
"""Get the last weekday in a given month. e.g:
>>> # the last monday in Jan 2013
>>> Calendar.get_last_weekday_in_month(2013, 1, MON)
datetime.date(2013, 1, 28)
"""
day = date(year, month, monthrange(year, month)[1])
while True:
if day.weekday() == weekday:
break
day = day - timedelta(days=1)
return day
@staticmethod
def get_first_weekday_after(day, weekday):
"""Get the first weekday after a given day. If the day is the same
weekday, the same day will be returned.
>>> # the first monday after Apr 1 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 1), 0)
datetime.date(2015, 4, 6)
>>> # the first tuesday after Apr 14 2015
>>> Calendar.get_first_weekday_after(date(2015, 4, 14), 1)
datetime.date(2015, 4, 14)
"""
day_delta = (weekday - day.weekday()) % 7
day = day + timedelta(days=day_delta)
return day
class ChristianMixin(Calendar):
EASTER_METHOD = None # to be assigned in the inherited mixin
include_epiphany = False
include_clean_monday = False
include_annunciation = False
include_ash_wednesday = False
include_palm_sunday = False
include_holy_thursday = False
include_good_friday = False
include_easter_monday = False
include_easter_saturday = False
include_easter_sunday = False
include_all_saints = False
include_immaculate_conception = False
include_christmas = True
include_christmas_eve = False
include_ascension = False
include_assumption = False
include_whit_sunday = False
whit_sunday_label = 'Whit Sunday'
include_whit_monday = False
whit_monday_label = 'Whit Monday'
include_corpus_christi = False
include_boxing_day = False
boxing_day_label = "Boxing Day"
def get_ash_wednesday(self, year):
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=46)
def get_palm_sunday(self, year):
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=7)
def get_holy_thursday(self, year):
"Return the date of the last thursday before easter"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=3)
def get_good_friday(self, year):
"Return the date of the last friday before easter"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=2)
def get_clean_monday(self, year):
"Return the clean monday date"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=48)
def get_easter_saturday(self, year):
"Return the Easter Saturday date"
sunday = self.get_easter_sunday(year)
return sunday - timedelta(days=1)
def get_easter_sunday(self, year):
"Return the date of the easter (sunday) -- following the easter method"
return easter.easter(year, self.EASTER_METHOD)
def get_easter_monday(self, year):
"Return the date of the monday after easter"
sunday = self.get_easter_sunday(year)
return sunday + timedelta(days=1)
def get_ascension_thursday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=39)
def get_whit_monday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=50)
def get_whit_sunday(self, year):
easter = self.get_easter_sunday(year)
return easter + timedelta(days=49)
def get_corpus_christi(self, year):
return self.get_easter_sunday(year) + timedelta(days=60)
def get_variable_days(self, year): # noqa
"Return the christian holidays list according to the mixin"
days = super(ChristianMixin, self).get_variable_days(year)
if self.include_epiphany:
days.append((date(year, 1, 6), "Epiphany"))
if self.include_clean_monday:
days.append((self.get_clean_monday(year), "Clean Monday"))
if self.include_annunciation:
days.append((date(year, 3, 25), "Annunciation"))
if self.include_ash_wednesday:
days.append((self.get_ash_wednesday(year), "Ash Wednesday"))
if self.include_palm_sunday:
days.append((self.get_palm_sunday(year), "Palm Sunday"))
if self.include_holy_thursday:
days.append((self.get_holy_thursday(year), "Holy Thursday"))
if self.include_good_friday:
days.append((self.get_good_friday(year), "Good Friday"))
if self.include_easter_saturday:
days.append((self.get_easter_saturday(year), "Easter Saturday"))
if self.include_easter_sunday:
days.append((self.get_easter_sunday(year), "Easter Sunday"))
if self.include_easter_monday:
days.append((self.get_easter_monday(year), "Easter Monday"))
if self.include_assumption:
days.append((date(year, 8, 15), "Assumption of Mary to Heaven"))
if self.include_all_saints:
days.append((date(year, 11, 1), "All Saints Day"))
if self.include_immaculate_conception:
days.append((date(year, 12, 8), "Immaculate Conception"))
if self.include_christmas:
days.append((date(year, 12, 25), "Christmas Day"))
if self.include_christmas_eve:
days.append((date(year, 12, 24), "Christmas Eve"))
if self.include_boxing_day:
days.append((date(year, 12, 26), self.boxing_day_label))
if self.include_ascension:
days.append((
self.get_ascension_thursday(year), "Ascension Thursday"))
if self.include_whit_monday:
days.append((self.get_whit_monday(year), self.whit_monday_label))
if self.include_whit_sunday:
days.append((self.get_whit_sunday(year), self.whit_sunday_label))
if self.include_corpus_christi:
days.append((self.get_corpus_christi(year), "Corpus Christi"))
return days
class WesternCalendar(Calendar):
"""
General usage calendar for Western countries.
(chiefly Europe and Northern America)
"""
EASTER_METHOD = easter.EASTER_WESTERN
WEEKEND_DAYS = (SAT, SUN)
shift_new_years_day = False
FIXED_HOLIDAYS = (
(1, 1, 'New year'),
)
def get_weekend_days(self):
"Week-end days are SATurday and SUNday."
return self.WEEKEND_DAYS
def get_variable_days(self, year):
days = super(WesternCalendar, self).get_variable_days(year)
new_year = date(year, 1, 1)
if self.shift_new_years_day:
if new_year.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(new_year),
"New Year shift"))
return days
class OrthodoxMixin(ChristianMixin):
EASTER_METHOD = easter.EASTER_ORTHODOX
class LunarCalendar(Calendar):
"""Calendar including lunar days
"""
FIXED_HOLIDAYS = (
(1, 1, 'Lunar new year'),
)
@staticmethod
def lunar(year, month, day):
return LunarDate(year, month, day).toSolarDate()
class EphemMixin(LunarCalendar):
def calculate_equinoxes(self, year, timezone='UTC'):
""" calculate equinox with time zone """
tz = pytz.timezone(timezone)
d1 = ephem.next_equinox(str(year))
d = ephem.Date(str(d1))
equinox1 = d.datetime() + tz.utcoffset(d.datetime())
d2 = ephem.next_equinox(d1)
d = ephem.Date(str(d2))
equinox2 = d.datetime() + tz.utcoffset(d.datetime())
return (equinox1.date(), equinox2.date())
def solar_term(self, year, degrees, timezone='UTC'):
"""
Returns the date of the solar term for the given longitude
and the given year.
Solar terms are used for Chinese and Taiwanese holidays
(e.g. Qingming Festival in Taiwan).
More information:
- https://en.wikipedia.org/wiki/Solar_term
- https://en.wikipedia.org/wiki/Qingming
This function is adapted from the following topic:
https://answers.launchpad.net/pyephem/+question/110832
"""
twopi = 2 * pi
tz = pytz.timezone(timezone)
# Find out the sun's current longitude.
sun = ephem.Sun(ephem.Date(str(year)))
current_longitude = sun.hlong - pi
# Find approximately the right time of year.
target_longitude = degrees * ephem.degree
difference = (target_longitude - current_longitude) % twopi
t0 = ephem.Date(str(year)) + 365.25 * difference / twopi
# Zero in on the exact moment.
def f(t):
sun.compute(t)
longitude = sun.hlong - pi
return ephem.degrees(target_longitude - longitude).znorm
d = ephem.Date(ephem.newton(f, t0, t0 + ephem.minute))
solar_term = d.datetime() + tz.utcoffset(d.datetime())
return solar_term.date()
class CalverterMixin(Calendar):
conversion_method = None
ISLAMIC_HOLIDAYS = ()
def __init__(self, *args, **kwargs):
super(CalverterMixin, self).__init__(*args, **kwargs)
self.calverter = Calverter()
if self.conversion_method is None:
raise NotImplementedError
def converted(self, year):
conversion_method = getattr(
self.calverter, 'jd_to_%s' % self.conversion_method)
current = date(year, 1, 1)
days = []
while current.year == year:
julian_day = self.calverter.gregorian_to_jd(
current.year,
current.month,
current.day)
days.append(conversion_method(julian_day))
current = current + timedelta(days=1)
return days
def calverted_years(self, year):
converted = self.converted(year)
generator = (y for y, m, d in converted)
return sorted(list(set(generator)))
def get_islamic_holidays(self):
return self.ISLAMIC_HOLIDAYS
def get_variable_days(self, year):
warnings.warn('Please take not that, due to arbitrary decisions, '
'this Islamic calendar computation may be wrong.')
days = super(CalverterMixin, self).get_variable_days(year)
years = self.calverted_years(year)
conversion_method = getattr(
self.calverter, '%s_to_jd' % self.conversion_method)
for month, day, label in self.get_islamic_holidays():
for y in years:
jd = conversion_method(y, month, day)
g_year, g_month, g_day = self.calverter.jd_to_gregorian(jd)
if g_year == year:
holiday = date(g_year, g_month, g_day)
days.append((holiday, label))
return days
class IslamicMixin(CalverterMixin):
conversion_method = 'islamic'
include_prophet_birthday = False
include_day_after_prophet_birthday = False
include_start_ramadan = False
include_eid_al_fitr = False
length_eid_al_fitr = 1
include_eid_al_adha = False
length_eid_al_adha = 1
include_day_of_sacrifice = False
include_day_of_sacrifice_label = "Eid al-Adha"
include_islamic_new_year = False
include_laylat_al_qadr = False
def get_islamic_holidays(self):
"""Return a list of Islamic (month, day, label) for islamic holidays.
Please take note that these dates must be expressed using the Islamic
Calendar"""
days = list(super(IslamicMixin, self).get_islamic_holidays())
if self.include_islamic_new_year:
days.append((1, 1, "Islamic New Year"))
if self.include_prophet_birthday:
days.append((3, 12, "Prophet's Birthday"))
if self.include_day_after_prophet_birthday:
days.append((3, 13, "Day after Prophet's Birthday"))
if self.include_start_ramadan:
days.append((9, 1, "Start of ramadan"))
if self.include_eid_al_fitr:
for x in range(self.length_eid_al_fitr):
days.append((10, x + 1, "Eid al-Fitr"))
if self.include_eid_al_adha:
for x in range(self.length_eid_al_adha):
days.append((12, x + 10, "Eid al-Adha"))
if self.include_day_of_sacrifice:
days.append((12, 10, self.include_day_of_sacrifice_label))
if self.include_laylat_al_qadr:
warnings.warn("The Islamic holiday named Laylat al-Qadr is decided"
" by the religious authorities. It is not possible"
" to compute it. You'll have to add it manually.")
return tuple(days)
class JalaliMixin(CalverterMixin):
conversion_method = 'jalali'
|
|
import threading
import logging
import subprocess
import protocol
import pycountry
import gnupg
import obelisk
import json
import random
from bitcoin import (
apply_multisignatures,
eligius_pushtx,
mk_multisig_script,
mktx,
multisign,
scriptaddr
)
import tornado.websocket
from twisted.internet import reactor
from backuptool import BackupTool, Backup, BackupJSONEncoder
import trust
class ProtocolHandler(object):
def __init__(self, transport, market_application, handler, db,
loop_instance):
self.market_application = market_application
self.market = self.market_application.market
self.transport = transport
self.handler = handler
self.db = db
self.transport.set_websocket_handler(self)
self.all_messages = (
'peer',
'page',
'peer_remove',
'node_page',
'listing_results',
'listing_result',
'no_listing_result',
'release_funds_tx',
'all'
)
# register on transport events to forward..
self.transport.add_callbacks([
(
msg,
{
'cb': getattr(self, 'on_%s' % msg),
'validator_cb': getattr(self, 'validate_on_%s' % msg)
}
)
for msg in self.all_messages
])
# handlers from events coming from websocket, we shouldnt need this
self._handlers = {
"load_page": self.client_load_page,
"connect": self.client_connect,
"peers": self.client_peers,
"query_page": self.client_query_page,
"review": self.client_review,
"order": self.client_order,
"search": self.client_query_network_for_products,
"shout": self.client_shout,
"get_notaries": self.client_get_notaries,
"add_trusted_notary": self.client_add_trusted_notary,
"add_node": self.client_add_guid,
"remove_trusted_notary": self.client_remove_trusted_notary,
"query_store_products": self.client_query_store_products,
"check_order_count": self.client_check_order_count,
"query_orders": self.client_query_orders,
"query_contracts": self.client_query_contracts,
"stop_server": self.client_stop_server,
"query_messages": self.client_query_messages,
"send_message": self.client_send_message,
"update_settings": self.client_update_settings,
"query_order": self.client_query_order,
"pay_order": self.client_pay_order,
"ship_order": self.client_ship_order,
"release_payment": self.client_release_payment,
"refund_order": self.client_refund_order,
"remove_contract": self.client_remove_contract,
"generate_secret": self.client_generate_secret,
"welcome_dismissed": self.client_welcome_dismissed,
"republish_contracts": self.client_republish_contracts,
"import_raw_contract": self.client_import_raw_contract,
"create_contract": self.client_create_contract,
"clear_dht_data": self.client_clear_dht_data,
"clear_peers_data": self.client_clear_peers_data,
"read_log": self.client_read_log,
"create_backup": self.client_create_backup,
"get_backups": self.get_backups,
"undo_remove_contract": self.client_undo_remove_contract,
}
self.timeouts = []
# unused for now, wipe it if you want later.
self.loop = loop_instance
self.log = logging.getLogger(
'[%s] %s' % (self.transport.market_id, self.__class__.__name__)
)
def validate_on_page(self, *data):
self.log.debug('Validating on page message.')
keys = ("senderGUID", "sin")
return all(k in data for k in keys)
def on_page(self, page):
guid = page.get('senderGUID')
self.log.info(page)
sin = page.get('sin')
self.log.info("Received store info from node: %s", page)
if sin and page:
self.market.pages[sin] = page
# TODO: allow async calling in different thread
def reputation_pledge_retrieved(amount, page):
self.log.debug(
'Received reputation pledge amount %s for guid %s',
amount, guid
)
SATOSHIS_IN_BITCOIN = 100000000
bitcoins = float(amount) / SATOSHIS_IN_BITCOIN
bitcoins = round(bitcoins, 4)
self.market.pages[sin]['reputation_pledge'] = bitcoins
self.send_to_client(
None, {'type': 'reputation_pledge_update', 'value': bitcoins}
)
trust.get_global(
guid,
lambda amount, page=page: reputation_pledge_retrieved(amount, page)
)
def send_opening(self):
peers = self.get_peers()
countryCodes = []
for country in pycountry.countries:
countryCodes.append({"code": country.alpha2, "name": country.name})
settings = self.market.get_settings()
message = {
'type': 'myself',
'pubkey': settings.get('pubkey'),
'peers': peers,
'settings': settings,
'guid': self.transport.guid,
'sin': self.transport.sin,
'uri': self.transport.uri,
'countryCodes': countryCodes,
}
self.send_to_client(None, message)
burnAddr = trust.burnaddr_from_guid(self.transport.guid)
def found_unspent(amount):
self.send_to_client(None, {
'type': 'burn_info_available',
'amount': amount,
'addr': burnAddr
})
trust.get_unspent(burnAddr, found_unspent)
def client_read_log(self, socket_handler, msg):
self.market.p = subprocess.Popen(
["tail", "-f", "logs/development.log", "logs/production.log"],
stdout=subprocess.PIPE)
self.stream = tornado.iostream.PipeIOStream(
self.market.p.stdout.fileno()
)
self.stream.read_until("\n", self.line_from_nettail)
def line_from_nettail(self, data):
self.send_to_client(None, {"type": "log_output", "line": data})
self.stream.read_until("\n", self.line_from_nettail)
def validate_on_listing_results(self, *data):
self.log.debug('Validating on listing results message.')
return "contracts" in data
def on_listing_results(self, msg):
self.log.debug('Found results %s', msg)
self.send_to_client(None, {
"type": "store_contracts",
"products": msg['contracts']
})
def validate_on_no_listing_result(self, *data):
self.log.debug('Validating on no listing result message.')
return True
def on_no_listing_result(self, msg):
self.log.debug('No listings found')
self.send_to_client(None, {
"type": "no_listings_found"
})
def validate_on_listing_result(self, *data):
self.log.debug('Validating on listing result message.')
return True
def on_listing_result(self, msg):
self.log.debug('Found result %s', msg)
self.send_to_client(None, {
"type": "store_contract",
"contract": msg
})
def client_stop_server(self, socket_handler, msg):
self.log.error('Killing OpenBazaar')
self.market_application.shutdown()
def client_load_page(self, socket_handler, msg):
self.send_to_client(None, {"type": "load_page"})
def client_add_trusted_notary(self, socket_handler, msg):
self.log.info('Adding trusted notary %s', msg)
self.market.add_trusted_notary(msg.get('guid'), msg.get('nickname'))
def client_add_guid(self, socket_handler, msg):
self.log.info('Adding node by guid %s', msg)
def cb(msg):
self.get_peers()
self.transport.dht.iterativeFindNode(msg.get('guid'), cb)
def client_remove_trusted_notary(self, socket_handler, msg):
self.log.info('Removing trusted notary %s', msg)
self.market.remove_trusted_notary(msg.get('guid'))
def client_get_notaries(self, socket_handler, msg):
self.log.debug('Retrieving notaries')
notaries = self.market.get_notaries()
self.log.debug('Getting notaries %s', notaries)
self.send_to_client(None, {
"type": "settings_notaries",
"notaries": notaries
})
def client_clear_dht_data(self, socket_handler, msg):
self.log.debug('Clearing DHT Data')
self.db.deleteEntries("datastore")
def client_clear_peers_data(self, socket_handler, msg):
self.log.debug('Clearing Peers Data')
self.db.deleteEntries("peers")
# Requests coming from the client
def client_connect(self, socket_handler, msg):
self.log.info("Connection command: %s", msg)
self.transport.connect(msg['uri'], lambda x: None)
self.send_ok()
def client_peers(self, socket_handler, msg):
self.log.info("Peers command")
self.send_to_client(None, {"type": "peers", "peers": self.get_peers()})
def client_welcome_dismissed(self, socket_handler, msg):
self.market.disable_welcome_screen()
def client_undo_remove_contract(self, socket_handler, msg):
self.market.undo_remove_contract(msg.get('contract_id'))
def client_check_order_count(self, socket_handler, msg):
self.log.debug('Checking order count')
orders = self.db.selectEntries(
"orders",
{
"market_id": self.transport.market_id,
"state": "Waiting for Payment"
}
)
self.send_to_client(
None,
{"type": "order_count", "count": len(orders)}
)
def refresh_peers(self):
self.log.info("Peers command")
self.send_to_client(None, {"type": "peers", "peers": self.get_peers()})
def client_query_page(self, socket_handler, msg):
findGUID = msg['findGUID']
query_id = random.randint(0, 1000000)
self.timeouts.append(query_id)
def cb(msg, query_id):
self.log.info('Received a query page response: %s', query_id)
self.market.query_page(
findGUID,
lambda msg, query_id=query_id: cb(msg, query_id)
)
def client_query_orders(self, socket_handler=None, msg=None):
self.log.info("Querying for Orders %s", msg)
if 'page' in msg:
page = msg['page']
else:
page = 0
if msg is not None and 'merchant' in msg:
if msg['merchant'] == 1:
orders = self.market.orders.get_orders(page, True)
elif msg['merchant'] == 2:
orders = self.market.orders.get_orders(
page, merchant=None, notarizations=True
)
else:
orders = self.market.orders.get_orders(page, merchant=False)
else:
orders = self.market.orders.get_orders(page)
self.send_to_client(None, {
"type": "myorders",
"page": page,
"total": orders['total'],
"orders": orders['orders']
})
def client_query_contracts(self, socket_handler, msg):
self.log.info("Querying for Contracts")
page = msg['page'] if 'page' in msg else 0
contracts = self.market.get_contracts(page)
self.send_to_client(None, {
"type": "contracts",
"contracts": contracts
})
def client_query_messages(self, socket_handler, msg):
self.log.info("Querying for Messages")
# Query bitmessage for messages
messages = self.market.get_messages()
self.log.info('Bitmessages: %s', messages)
self.send_to_client(None, {"type": "messages", "messages": messages})
def client_send_message(self, socket_handler, msg):
self.log.info("Sending message")
# Send message with market's bitmessage
self.market.send_message(msg)
def client_republish_contracts(self, socket_handler, msg):
self.log.info("Republishing contracts")
self.market.republish_contracts()
def client_import_raw_contract(self, socket_handler, contract):
self.log.info(
"Importing New Contract "
"(NOT IMPLEMENTED! TODO: Market.import_contract(contract)"
)
# Get a single order's info
def client_query_order(self, socket_handler, msg):
order = self.market.orders.get_order(msg['orderId'])
self.send_to_client(None, {"type": "orderinfo", "order": order})
def client_update_settings(self, socket_handler, msg):
self.send_to_client(None, {"type": "settings", "values": msg})
if msg['settings'].get('btc_pubkey'):
del msg['settings']['btc_pubkey']
self.market.save_settings(msg['settings'])
def client_create_contract(self, socket_handler, contract):
self.log.info("New Contract: %s", contract)
self.market.save_contract(contract)
def client_remove_contract(self, socket_handler, msg):
self.log.info("Remove contract: %s", msg)
self.market.remove_contract(msg)
def client_pay_order(self, socket_handler, msg):
self.log.info("Marking Order as Paid: %s", msg)
order = self.market.orders.get_order(msg['orderId'])
order['shipping_address'] = self.market.shipping_address()
# Send to exchange partner
self.market.orders.pay_order(order, msg['orderId'])
def client_ship_order(self, socket_handler, msg):
self.log.info("Shipping order out: %s", msg)
order = self.market.orders.get_order(msg['orderId'])
# Send to exchange partner
self.market.orders.ship_order(
order, msg['orderId'], msg['paymentAddress']
)
def client_refund_order(self, socket_handler, msg):
self.log.info('Refunding payment and cancelling order')
# Get Order
order = self.market.orders.get_order(msg['orderId'])
contract = order['signed_contract_body']
# Find Seller Data in Contract
offer_data = ''.join(contract.split('\n')[8:])
index_of_seller_signature = offer_data.find(
'- - -----BEGIN PGP SIGNATURE-----', 0, len(offer_data)
)
offer_data_json = offer_data[:index_of_seller_signature]
offer_data_json = json.loads(offer_data_json)
self.log.info('Offer Data: %s', offer_data_json)
# Find Buyer Data in Contract
bid_data_index = offer_data.find(
'"Buyer"', index_of_seller_signature, len(offer_data)
)
end_of_bid_index = offer_data.find(
'- -----BEGIN PGP SIGNATURE', bid_data_index, len(offer_data)
)
bid_data_json = "{"
bid_data_json += offer_data[bid_data_index:end_of_bid_index]
bid_data_json = json.loads(bid_data_json)
# Find Notary Data in Contract
notary_data_index = offer_data.find(
'"Notary"', end_of_bid_index, len(offer_data)
)
end_of_notary_index = offer_data.find(
'-----BEGIN PGP SIGNATURE', notary_data_index, len(offer_data)
)
notary_data_json = "{"
notary_data_json += offer_data[notary_data_index:end_of_notary_index]
notary_data_json = json.loads(notary_data_json)
try:
client = obelisk.ObeliskOfLightClient(
'tcp://obelisk.coinkite.com:9091'
)
seller = offer_data_json['Seller']
buyer = bid_data_json['Buyer']
notary = notary_data_json['Notary']
pubkeys = [
seller['seller_BTC_uncompressed_pubkey'],
buyer['buyer_BTC_uncompressed_pubkey'],
notary['notary_BTC_uncompressed_pubkey']
]
script = mk_multisig_script(pubkeys, 2, 3)
multi_address = scriptaddr(script)
def cb(ec, history, order):
settings = self.market.get_settings()
private_key = settings.get('privkey')
if ec is not None:
self.log.error("Error fetching history: %s", ec)
# TODO: Send error message to GUI
return
# Create unsigned transaction
unspent = [row[:4] for row in history if row[4] is None]
# Send all unspent outputs (everything in the address) minus
# the fee
total_amount = 0
inputs = []
for row in unspent:
assert len(row) == 4, 'Obelisk returned a wonky row'
inputs.append("%s:%s" % (row[0].encode('hex'), row[1]))
value = row[3]
total_amount += value
# Constrain fee so we don't get negative amount to send
fee = min(total_amount, 10000)
send_amount = total_amount - fee
payment_output = order['payment_address']
tx = mktx(inputs, ["%s:%s" % (payment_output, send_amount)])
signatures = [multisign(tx, x, script, private_key)
for x in range(len(inputs))]
self.market.release_funds_to_merchant(
buyer['buyer_order_id'], tx, script, signatures,
order.get('merchant')
)
def get_history():
client.fetch_history(
multi_address,
lambda ec, history, order=order: cb(ec, history, order))
reactor.callFromThread(get_history)
except Exception as e:
self.log.error('%s', e)
def client_release_payment(self, socket_handler, msg):
self.log.info('Releasing payment to Merchant %s', msg)
order = self.market.orders.get_order(msg['orderId'])
contract = order['signed_contract_body']
# Find Seller Data in Contract
offer_data = ''.join(contract.split('\n')[8:])
index_of_seller_signature = offer_data.find(
'- - -----BEGIN PGP SIGNATURE-----', 0, len(offer_data)
)
offer_data_json = offer_data[0:index_of_seller_signature]
offer_data_json = json.loads(offer_data_json)
self.log.info('Offer Data: %s', offer_data_json)
# Find Buyer Data in Contract
bid_data_index = offer_data.find(
'"Buyer"', index_of_seller_signature, len(offer_data)
)
end_of_bid_index = offer_data.find(
'- -----BEGIN PGP SIGNATURE', bid_data_index, len(offer_data)
)
bid_data_json = "{"
bid_data_json += offer_data[bid_data_index:end_of_bid_index]
bid_data_json = json.loads(bid_data_json)
# Find Notary Data in Contract
notary_data_index = offer_data.find(
'"Notary"', end_of_bid_index, len(offer_data)
)
end_of_notary_index = offer_data.find(
'-----BEGIN PGP SIGNATURE', notary_data_index, len(offer_data)
)
notary_data_json = "{"
notary_data_json += offer_data[notary_data_index:end_of_notary_index]
notary_data_json = json.loads(notary_data_json)
self.log.info('Notary Data: %s', notary_data_json)
try:
client = obelisk.ObeliskOfLightClient(
'tcp://obelisk.coinkite.com:9091'
)
seller = offer_data_json['Seller']
buyer = bid_data_json['Buyer']
notary = notary_data_json['Notary']
pubkeys = [
seller['seller_BTC_uncompressed_pubkey'],
buyer['buyer_BTC_uncompressed_pubkey'],
notary['notary_BTC_uncompressed_pubkey']
]
script = mk_multisig_script(pubkeys, 2, 3)
multi_address = scriptaddr(script)
def cb(ec, history, order):
settings = self.market.get_settings()
private_key = settings.get('privkey')
if ec is not None:
self.log.error("Error fetching history: %s", ec)
# TODO: Send error message to GUI
return
# Create unsigned transaction
unspent = [row[:4] for row in history if row[4] is None]
# Send all unspent outputs (everything in the address) minus
# the fee
total_amount = 0
inputs = []
for row in unspent:
assert len(row) == 4
inputs.append(
str(row[0].encode('hex')) + ":" + str(row[1])
)
value = row[3]
total_amount += value
# Constrain fee so we don't get negative amount to send
fee = min(total_amount, 10000)
send_amount = total_amount - fee
payment_output = order['payment_address']
tx = mktx(
inputs, [str(payment_output) + ":" + str(send_amount)]
)
signatures = []
for x in range(0, len(inputs)):
ms = multisign(tx, x, script, private_key)
signatures.append(ms)
print signatures
self.market.release_funds_to_merchant(
buyer['buyer_order_id'],
tx, script, signatures,
order.get('merchant')
)
def get_history():
client.fetch_history(
multi_address,
lambda ec, history, order=order: cb(ec, history, order)
)
reactor.callFromThread(get_history)
except Exception as e:
self.log.error('%s', e)
def validate_on_release_funds_tx(self, *data):
self.log.debug('Validating on release funds tx message.')
keys = ("senderGUID", "buyer_order_id", "script", "tx")
return all(k in data for k in keys)
def on_release_funds_tx(self, msg):
self.log.info('Receiving signed tx from buyer')
buyer_order_id = "%s-%s" % (msg['senderGUID'], msg['buyer_order_id'])
order = self.market.orders.get_order(buyer_order_id, by_buyer_id=True)
contract = order['signed_contract_body']
# Find Seller Data in Contract
offer_data = ''.join(contract.split('\n')[8:])
index_of_seller_signature = offer_data.find(
'- - -----BEGIN PGP SIGNATURE-----', 0, len(offer_data)
)
offer_data_json = offer_data[0:index_of_seller_signature]
offer_data_json = json.loads(offer_data_json)
self.log.info('Offer Data: %s', offer_data_json)
# Find Buyer Data in Contract
bid_data_index = offer_data.find(
'"Buyer"', index_of_seller_signature, len(offer_data)
)
end_of_bid_index = offer_data.find(
'- -----BEGIN PGP SIGNATURE', bid_data_index, len(offer_data)
)
bid_data_json = "{"
bid_data_json += offer_data[bid_data_index:end_of_bid_index]
bid_data_json = json.loads(bid_data_json)
# Find Notary Data in Contract
notary_data_index = offer_data.find(
'"Notary"', end_of_bid_index, len(offer_data)
)
end_of_notary_index = offer_data.find(
'-----BEGIN PGP SIGNATURE', notary_data_index, len(offer_data)
)
notary_data_json = "{"
notary_data_json += offer_data[notary_data_index:end_of_notary_index]
notary_data_json = json.loads(notary_data_json)
self.log.info('Notary Data: %s', notary_data_json)
try:
client = obelisk.ObeliskOfLightClient(
'tcp://obelisk.coinkite.com:9091'
)
script = msg['script']
tx = msg['tx']
multi_addr = scriptaddr(script)
def cb(ec, history, order):
if ec is not None:
self.log.error("Error fetching history: %s", ec)
# TODO: Send error message to GUI
return
unspent = [row[:4] for row in history if row[4] is None]
# Send all unspent outputs (everything in the address) minus
# the fee
inputs = []
for row in unspent:
assert len(row) == 4
inputs.append(
str(row[0].encode('hex')) + ":" + str(row[1])
)
seller_signatures = []
print 'private key ', self.transport.settings['privkey']
for x in range(0, len(inputs)):
ms = multisign(
tx, x, script, self.transport.settings['privkey']
)
print 'seller sig', ms
seller_signatures.append(ms)
tx2 = apply_multisignatures(
tx, 0, script, seller_signatures[0], msg['signatures'][0]
)
print 'FINAL SCRIPT: %s' % tx2
print 'Sent', eligius_pushtx(tx2)
self.send_to_client(
None,
{
"type": "order_notify",
"msg": "Funds were released for your sale."
}
)
def get_history():
client.fetch_history(
multi_addr,
lambda ec, history, order=order: cb(ec, history, order)
)
reactor.callFromThread(get_history)
except Exception as e:
self.log.error('%s', e)
def client_generate_secret(self, socket_handler, msg):
self.transport._generate_new_keypair()
self.send_opening()
def client_order(self, socket_handler, msg):
self.market.orders.on_order(msg)
def client_review(self, socket_handler, msg):
pubkey = msg['pubkey'].decode('hex')
text = msg['text']
rating = msg['rating']
self.market.reputation.create_review(pubkey, text, rating)
# Search for markets ATM
# TODO: multi-faceted search support
def client_search(self, socket_handler, msg):
self.log.info("[Search] %s", msg)
self.transport.dht.iterativeFindValue(
msg['key'], callback=self.on_node_search_value
)
def client_query_network_for_products(self, socket_handler, msg):
self.log.info("Querying for Contracts %s", msg)
self.transport.dht.find_listings_by_keyword(
self.transport,
msg['key'].upper(),
callback=self.on_find_products
)
def client_query_store_products(self, socket_handler, msg):
self.log.info("Searching network for contracts")
self.transport.dht.find_listings(
self.transport,
msg['key'],
callback=self.on_find_products_by_store
)
def client_create_backup(self, socket_handler, msg):
"""Currently hard-coded for testing: need to find out Installation path.
Talk to team about right location for backup files
they might have to be somewhere outside the installation path
as some OSes might not allow the modification of the installation
folder
e.g. MacOS won't allow for changes if the .app has been signed.
and all files created by the app, have to be outside, usually at
~/Library/Application Support/OpenBazaar/backups ??
"""
def on_backup_done(backupPath):
self.log.info('Backup successfully created at %s', backupPath)
self.send_to_client(None,
{'type': 'create_backup_result',
'result': 'success',
'detail': backupPath})
def on_backup_error(error):
self.log.info('Backup error: %s', error.strerror)
self.send_to_client(None,
{'type': 'create_backup_result',
'result': 'failure',
'detail': error.strerror})
BackupTool.backup(BackupTool.get_installation_path(),
BackupTool.get_backup_path(),
on_backup_done,
on_backup_error)
def get_backups(self, socket_handler, msg=None):
if "127.0.0.1" == socket_handler.request.remote_ip:
try:
backups = [json.dumps(x, cls=BackupJSONEncoder)
for x in
Backup.get_backups(BackupTool.get_backup_path())]
self.send_to_client(None, {'type': 'on_get_backups_response',
'result': 'success',
'backups': backups
})
except Exception:
self.send_to_client(None, {'type': 'on_get_backups_response',
'result': 'failure'})
def on_find_products_by_store(self, results):
self.log.info('Found Contracts: %s', type(results))
self.log.info(results)
if len(results) > 0 and type(results['data']) == unicode:
results = json.loads(results[0])
self.log.info(results)
if 'type' not in results:
return
else:
self.log.debug('Results: %s', results['contracts'])
if len(results) > 0 and 'data' in results:
data = results['data']
contracts = data['contracts']
signature = results['signature']
self.log.info('Signature: %s', signature)
# Go get listing metadata and then send it to the GUI
for contract in contracts:
self.transport.dht.iterativeFindValue(
contract,
callback=lambda msg, key=contract: (
self.on_node_search_value(msg, key)
)
)
def on_find_products(self, results):
self.log.info('Found Contracts: %s', type(results))
self.log.info(results)
if len(results):
if 'listings' in results:
# TODO: Validate signature of listings matches data
# Go get listing metadata and then send it to the GUI
for contract in results['listings']:
self.log.debug('Results contract %s', contract)
key = contract.get('key', contract)
self.transport.dht.iterativeFindValue(
key,
callback=lambda msg, key=key: (
self.on_global_search_value(msg, key)
)
)
def client_shout(self, socket_handler, msg):
msg['uri'] = self.transport.uri
msg['pubkey'] = self.transport.pubkey
msg['senderGUID'] = self.transport.guid
msg['senderNick'] = self.transport.nickname
self.transport.send(protocol.shout(msg))
def on_node_search_value(self, results, key):
self.log.debug('Listing Data: %s %s', results, key)
# Import gpg pubkey
gpg = gnupg.GPG()
# Retrieve JSON from the contract
# 1) Remove PGP Header
contract_data = ''.join(results.split('\n')[3:])
index_of_signature = contract_data.find(
'-----BEGIN PGP SIGNATURE-----', 0, len(contract_data)
)
contract_data_json = contract_data[0:index_of_signature]
try:
contract_data_json = json.loads(contract_data_json)
seller = contract_data_json.get('Seller')
seller_pubkey = seller.get('seller_PGP')
gpg.import_keys(seller_pubkey)
v = gpg.verify(results)
if v:
self.send_to_client(None, {
"type": "new_listing",
"data": contract_data_json,
"key": key,
"rawContract": results
})
else:
self.log.error('Could not verify signature of contract.')
except Exception:
self.log.debug('Error getting JSON contract')
def on_global_search_value(self, results, key):
self.log.info('global search: %s %s', results, key)
if results and type(results) is not list:
self.log.debug('Listing Data: %s %s', results, key)
# Import gpg pubkey
gpg = gnupg.GPG()
# Retrieve JSON from the contract
# 1) Remove PGP Header
contract_data = ''.join(results.split('\n')[3:])
index_of_signature = contract_data.find(
'-----BEGIN PGP SIGNATURE-----', 0, len(contract_data)
)
contract_data_json = contract_data[0:index_of_signature]
try:
contract_data_json = json.loads(contract_data_json)
seller_pubkey = contract_data_json.get(
'Seller'
).get(
'seller_PGP'
)
gpg.import_keys(seller_pubkey)
v = gpg.verify(results)
if v:
seller = contract_data_json.get('Seller')
contract_guid = seller.get('seller_GUID')
if contract_guid == self.transport.guid:
nickname = self.transport.nickname
else:
routing_table = self.transport.dht.routingTable
peer = routing_table.getContact(contract_guid)
nickname = peer.nickname if peer is not None else ""
self.send_to_client(None, {
"type": "global_search_result",
"data": contract_data_json,
"key": key,
"rawContract": results,
"nickname": nickname
})
else:
self.log.error('Could not verify signature of contract.')
except Exception:
self.log.debug('Error getting JSON contract')
else:
self.log.info('No results')
def on_node_search_results(self, results):
if len(results) > 1:
self.send_to_client(None, {
"type": "peers",
"peers": self.get_peers()
})
else:
# Add peer to list of markets
self.on_peer(results[0])
# Load page for the store
self.market.query_page(results[0].guid)
def validate_on_peer(self, *data):
self.log.debug('Validating on node peer message.')
return "address" in data
# messages coming from "the market"
def on_peer(self, peer):
self.log.info("Add peer: %s", peer)
response = {'type': 'peer',
'pubkey': peer.pub
if peer.pub
else 'unknown',
'guid': peer.guid
if peer.guid
else '',
'uri': peer.address}
self.send_to_client(None, response)
def validate_on_peer_remove(self, *data):
self.log.debug('Validating on node remove peer message.')
return True
def on_peer_remove(self, msg):
self.send_to_client(None, msg)
def validate_on_node_page(self, *data):
self.log.debug('Validating on node page message.')
return True
def on_node_page(self, page):
self.send_to_client(None, page)
def validate_on_all(self, *data):
self.log.debug('Validating on node message.')
return True
def on_all(self, *args):
first = args[0]
if isinstance(first, dict):
self.send_to_client(None, first)
else:
self.log.info("can't format")
# send a message
def send_to_client(self, error, result):
assert error is None or type(error) == str
response = {
"id": random.randint(0, 1000000),
"result": result
}
self.log.datadump('Sending to web client: %s', result)
if error:
response["error"] = error
self.handler.queue_response(response)
def send_ok(self):
self.send_to_client(None, {"type": "ok"})
# handler a request
def handle_request(self, socket_handler, request):
command = request["command"]
self.log.info('(I) ws.ProtocolHandler.handle_request of: %s', command)
if command not in self._handlers:
return False
params = request["params"]
# Create callback handler to write response to the socket.
self.log.debug('found a handler!')
self._handlers[command](socket_handler, params)
return True
def get_peers(self):
peers = []
for peer in self.transport.dht.activePeers:
if hasattr(peer, 'address'):
peer_item = {'uri': peer.address}
if peer.pub:
peer_item['pubkey'] = peer.pub
else:
peer_item['pubkey'] = 'unknown'
peer_item['guid'] = peer.guid
if peer.guid:
peer_item['sin'] = obelisk.EncodeBase58Check(
'\x0F\x02%s' + peer.guid.decode('hex')
)
peer_item['nick'] = peer.nickname
self.log.debug('Peer Nick %s', peer)
peers.append(peer_item)
return peers
class WebSocketHandler(tornado.websocket.WebSocketHandler):
# Set of WebsocketHandler
listeners = set()
# Protects listeners
listen_lock = threading.Lock()
def initialize(self, transport, market_application, db):
# pylint: disable=arguments-differ
# FIXME: Arguments shouldn't differ.
self.loop = tornado.ioloop.IOLoop.instance()
self.log = logging.getLogger(self.__class__.__name__)
self.log.info("Initialize websockethandler")
self.market_application = market_application
self.market = self.market_application.market
self.app_handler = ProtocolHandler(
transport,
self.market_application,
self,
db,
self.loop
)
self.transport = transport
def open(self):
self.log.info('Websocket open')
self.app_handler.send_opening()
with WebSocketHandler.listen_lock:
self.listeners.add(self)
self.connected = True
# self.connected not used for any logic, might remove if unnecessary
def on_close(self):
self.log.info("Websocket closed")
disconnect_msg = {
'command': 'disconnect_client',
'id': 0,
'params': []
}
self.connected = False
self.app_handler.handle_request(self, disconnect_msg)
with WebSocketHandler.listen_lock:
try:
self.listeners.remove(self)
except Exception:
self.log.error('Cannot remove socket listener')
@staticmethod
def _check_request(request):
return "command" in request and "id" in request and \
"params" in request and type(request["params"]) == dict
def on_message(self, message):
self.log.info('[On Message]: %s', message)
try:
request = json.loads(message)
except Exception:
logging.error("Error decoding message: %s", message, exc_info=True)
# Check request is correctly formed.
if not self._check_request(request):
logging.error("Malformed request: %s", request, exc_info=True)
return
if self.app_handler.handle_request(self, request):
return
def _send_response(self, response):
if self.ws_connection:
self.write_message(json.dumps(response))
def queue_response(self, response):
def send_response(*args):
self._send_response(response)
try:
# calling write_message or the socket is not thread safe
self.loop.current().add_callback(send_response)
except Exception:
logging.error("Error adding callback", exc_info=True)
|
|
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing Neutron Routers.
"""
import logging
from django.urls import reverse
from django.urls import reverse_lazy
from django.utils.translation import gettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard import api
LOG = logging.getLogger(__name__)
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length=255, label=_("Router Name"),
required=False)
admin_state_up = forms.BooleanField(
label=_("Enable Admin State"),
initial=True,
required=False,
help_text=_("If checked, the router will be enabled."))
external_network = forms.ThemableChoiceField(label=_("External Network"),
required=False)
enable_snat = forms.BooleanField(label=_("Enable SNAT"),
initial=True,
required=False)
mode = forms.ThemableChoiceField(label=_("Router Type"))
ha = forms.ThemableChoiceField(label=_("High Availability Mode"))
az_hints = forms.MultipleChoiceField(
label=_("Availability Zone Hints"),
required=False,
help_text=_("Availability Zones where the router may be scheduled. "
"Leaving this unset is equivalent to selecting all "
"Availability Zones"))
failure_url = 'horizon:project:routers:index'
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
self.dvr_allowed = api.neutron.get_feature_permission(self.request,
"dvr", "create")
if self.dvr_allowed:
mode_choices = [('server_default', _('Use Server Default')),
('centralized', _('Centralized')),
('distributed', _('Distributed'))]
self.fields['mode'].choices = mode_choices
else:
del self.fields['mode']
self.ha_allowed = api.neutron.get_feature_permission(self.request,
"l3-ha", "create")
if self.ha_allowed:
ha_choices = [('server_default', _('Use Server Default')),
('enabled', _('Enable HA mode')),
('disabled', _('Disable HA mode'))]
self.fields['ha'].choices = ha_choices
else:
del self.fields['ha']
networks = self._get_network_list(request)
if networks:
self.fields['external_network'].choices = networks
else:
del self.fields['external_network']
self.enable_snat_allowed = self.initial['enable_snat_allowed']
if (not networks or not self.enable_snat_allowed):
del self.fields['enable_snat']
try:
az_supported = api.neutron.is_extension_supported(
self.request, 'router_availability_zone')
if az_supported:
zones = api.neutron.list_availability_zones(
self.request, 'router', 'available')
self.fields['az_hints'].choices = [(zone['name'], zone['name'])
for zone in zones]
else:
del self.fields['az_hints']
except Exception:
msg = _("Failed to get availability zone list.")
exceptions.handle(self.request, msg)
del self.fields['az_hints']
def _get_network_list(self, request):
search_opts = {'router:external': True}
try:
networks = api.neutron.network_list(request, **search_opts)
except Exception as e:
LOG.info('Failed to get network list: %s', e)
msg = _('Failed to get network list.')
messages.warning(request, msg)
networks = []
choices = [(network.id, network.name or network.id)
for network in networks]
if choices:
choices.insert(0, ("", _("Select network")))
return choices
def handle(self, request, data):
try:
params = {'name': data['name'],
'admin_state_up': data['admin_state_up']}
# NOTE: admin form allows to specify tenant_id.
# We have the logic here to simplify the logic.
if 'tenant_id' in data and data['tenant_id']:
params['tenant_id'] = data['tenant_id']
if 'external_network' in data and data['external_network']:
params['external_gateway_info'] = {'network_id':
data['external_network']}
if self.enable_snat_allowed:
params['external_gateway_info']['enable_snat'] = \
data['enable_snat']
if 'az_hints' in data and data['az_hints']:
params['availability_zone_hints'] = data['az_hints']
if (self.dvr_allowed and data['mode'] != 'server_default'):
params['distributed'] = (data['mode'] == 'distributed')
if (self.ha_allowed and data['ha'] != 'server_default'):
params['ha'] = (data['ha'] == 'enabled')
router = api.neutron.router_create(request, **params)
message = (_('Router %s was successfully created.') %
router.name_or_id)
messages.success(request, message)
return router
except Exception as exc:
LOG.info('Failed to create router: %s', exc)
if exc.status_code == 409:
msg = _('Quota exceeded for resource router.')
else:
if data["name"]:
msg = _('Failed to create router "%s".') % data['name']
else:
msg = _('Failed to create router.')
redirect = reverse(self.failure_url)
exceptions.handle(request, msg, redirect=redirect)
return False
class UpdateForm(forms.SelfHandlingForm):
name = forms.CharField(label=_("Name"), required=False)
admin_state = forms.BooleanField(
label=_("Enable Admin State"),
required=False,
help_text=_("If checked, the router will be enabled."))
mode = forms.ThemableChoiceField(label=_("Router Type"))
ha = forms.BooleanField(label=_("High Availability Mode"), required=False)
redirect_url = reverse_lazy('horizon:project:routers:index')
def __init__(self, request, *args, **kwargs):
super().__init__(request, *args, **kwargs)
self.dvr_allowed = api.neutron.get_feature_permission(self.request,
"dvr", "update")
if not self.dvr_allowed:
del self.fields['mode']
elif self.initial.get('mode') == 'distributed':
# Neutron supports only changing from centralized to
# distributed now.
mode_choices = [('distributed', _('Distributed'))]
self.fields['mode'].widget = forms.TextInput(attrs={'readonly':
'readonly'})
self.fields['mode'].choices = mode_choices
else:
mode_choices = [('centralized', _('Centralized')),
('distributed', _('Distributed'))]
self.fields['mode'].choices = mode_choices
# TODO(amotoki): Due to Neutron Bug 1378525, Neutron disables
# PUT operation. It will be fixed in Kilo cycle.
# self.ha_allowed = api.neutron.get_feature_permission(
# self.request, "l3-ha", "update")
self.ha_allowed = False
if not self.ha_allowed:
del self.fields['ha']
def handle(self, request, data):
try:
params = {'admin_state_up': data['admin_state'],
'name': data['name']}
if self.dvr_allowed:
params['distributed'] = (data['mode'] == 'distributed')
if self.ha_allowed:
params['ha'] = data['ha']
router = api.neutron.router_update(request,
self.initial['router_id'],
**params)
msg = _('Router %s was successfully updated.') % router.name_or_id
messages.success(request, msg)
return router
except Exception as exc:
LOG.info('Failed to update router %(id)s: %(exc)s',
{'id': self.initial['router_id'], 'exc': exc})
name_or_id = data['name'] or self.initial['router_id']
msg = _('Failed to update router %s') % name_or_id
exceptions.handle(request, msg, redirect=self.redirect_url)
|
|
# Copyright 2020, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for constructing reconstruction models from Keras models."""
import collections
from typing import Iterable, List
import tensorflow as tf
import tensorflow_federated as tff
from reconstruction import reconstruction_model
def from_keras_model(
keras_model: tf.keras.Model,
*, # Caller passes below args by name.
global_layers: Iterable[tf.keras.layers.Layer],
local_layers: Iterable[tf.keras.layers.Layer],
input_spec,
) -> reconstruction_model.ReconstructionModel:
"""Builds a `ReconstructionModel` from a `tf.keras.Model`.
The `ReconstructionModel` returned by this function uses `keras_model` for
its forward pass and autodifferentiation steps. During reconstruction,
variables in `local_layers` are initialized and trained. Post-reconstruction,
variables in `global_layers` are trained and aggregated on the server.
Args:
keras_model: A `tf.keras.Model` object that is not compiled.
global_layers: Iterable of global layers to be aggregated across users. All
trainable and non-trainable model variables that can be aggregated on the
server should be included in these layers.
local_layers: Iterable of local layers not shared with the server. All
trainable and non-trainable model variables that should not be aggregated
on the server should be included in these layers.
input_spec: A structure of `tf.TensorSpec`s specifying the type of arguments
the model expects. Notice this must be a compound structure of two
elements, specifying both the data fed into the model to generate
predictions, as its first element, as well as the expected type of the
ground truth as its second.
Returns:
A `ReconstructionModel` object.
Raises:
TypeError: If `keras_model` is not an instance of `tf.keras.Model`.
ValueError: If `keras_model` was compiled.
"""
if not isinstance(keras_model, tf.keras.Model):
raise TypeError('Expected `int`, found {}.'.format(type(keras_model)))
if len(input_spec) != 2:
raise ValueError('The top-level structure in `input_spec` must contain '
'exactly two elements, as it must specify type '
'information for both inputs to and predictions from the '
'model.')
if keras_model._is_compiled: # pylint: disable=protected-access
raise ValueError('`keras_model` must not be compiled')
return _KerasReconstructionModel(
inner_model=keras_model,
global_layers=global_layers,
local_layers=local_layers,
input_spec=input_spec)
class _KerasReconstructionModel(reconstruction_model.ReconstructionModel):
"""Internal wrapper class for `tf.keras.Model` objects.
Wraps uncompiled Keras models as `ReconstructionModel`s.
Tracks global and local layers of the model. Parameters contained in global
layers are sent to the server and aggregated across users normally, and
parameters contained in local layers are reconstructed at the beginning of
each round and not sent to the server. The loss function and metrics are
passed to a `tff.templates.IterativeProcess` wrapping this model and computed
there for both training and evaluation.
"""
def __init__(self, inner_model: tf.keras.Model,
global_layers: Iterable[tf.keras.layers.Layer],
local_layers: Iterable[tf.keras.layers.Layer],
input_spec: tff.Type):
self._keras_model = inner_model
self._global_layers = list(global_layers)
self._local_layers = list(local_layers)
self._input_spec = input_spec
# Ensure global_layers and local_layers include exactly the Keras model's
# trainable and non-trainable variables. Use hashable refs to uniquely
# compare variables, and track variable names for informative error
# messages.
global_and_local_variables = set()
for layer in self._global_layers + self._local_layers:
global_and_local_variables.update(
(var.ref(), var.name)
for var in layer.trainable_variables + layer.non_trainable_variables)
keras_variables = set((var.ref(), var.name)
for var in inner_model.trainable_variables +
inner_model.non_trainable_variables)
if global_and_local_variables != keras_variables:
# Use a symmetric set difference to compare the variables, since either
# set may include variables not present in the other.
variables_difference = global_and_local_variables ^ keras_variables
raise ValueError('Global and local layers must include all trainable '
'and non-trainable variables in the Keras model. '
'Difference: {d}, Global and local layers vars: {v}, '
'Keras vars: {k}'.format(
d=variables_difference,
v=global_and_local_variables,
k=keras_variables))
@property
def global_trainable_variables(self):
variables = []
for layer in self._global_layers:
variables.extend(layer.trainable_variables)
return variables
@property
def global_non_trainable_variables(self):
variables = []
for layer in self._global_layers:
variables.extend(layer.non_trainable_variables)
return variables
@property
def local_trainable_variables(self):
variables = []
for layer in self._local_layers:
variables.extend(layer.trainable_variables)
return variables
@property
def local_non_trainable_variables(self):
variables = []
for layer in self._local_layers:
variables.extend(layer.non_trainable_variables)
return variables
@property
def input_spec(self):
return self._input_spec
@tf.function
def forward_pass(self, batch_input, training=True):
if hasattr(batch_input, '_asdict'):
batch_input = batch_input._asdict()
if isinstance(batch_input, collections.abc.Mapping):
inputs = batch_input.get('x')
else:
inputs = batch_input[0]
if inputs is None:
raise KeyError('Received a batch_input that is missing required key `x`. '
'Instead have keys {}'.format(list(batch_input.keys())))
predictions = self._keras_model(inputs, training=training)
if isinstance(batch_input, collections.abc.Mapping):
y_true = batch_input.get('y')
else:
y_true = batch_input[1]
return reconstruction_model.BatchOutput(
predictions=predictions,
labels=y_true,
num_examples=tf.shape(tf.nest.flatten(inputs)[0])[0])
class MeanLossMetric(tf.keras.metrics.Mean):
"""A `tf.keras.metrics.Metric` wrapper for a loss function.
The loss function can be a `tf.keras.losses.Loss`, or it can be any callable
with the signature loss(y_true, y_pred).
Note that the dependence on a passed-in loss function may cause issues with
serialization of this metric.
"""
def __init__(self, loss_fn, name='loss', dtype=tf.float32):
super().__init__(name, dtype)
self._loss_fn = loss_fn
def update_state(self, y_true, y_pred, sample_weight=None):
batch_size = tf.cast(tf.shape(y_pred)[0], self._dtype)
y_true = tf.cast(y_true, self._dtype)
y_pred = tf.cast(y_pred, self._dtype)
batch_loss = self._loss_fn(y_true, y_pred)
return super().update_state(batch_loss, batch_size)
def get_config(self):
"""Used to recreate an instance of this class during aggregation."""
config = {'loss_fn': self._loss_fn}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def read_metric_variables(
metrics: List[tf.keras.metrics.Metric]) -> collections.OrderedDict:
"""Reads values from Keras metric variables."""
metric_variables = collections.OrderedDict()
for metric in metrics:
metric_variables[metric.name] = [v.read_value() for v in metric.variables]
return metric_variables
def federated_output_computation_from_metrics(
metrics: List[tf.keras.metrics.Metric]
) -> tff.federated_computation: # pytype: disable=invalid-annotation
"""Produces a federated computation for aggregating Keras metrics.
This can be used to evaluate both Keras and non-Keras models using Keras
metrics. Aggregates metrics across clients by summing their internal
variables, producing new metrics with summed internal variables, and calling
metric.result() on each. See `tff.learning.federated_aggregate_keras_metric`
for details.
Args:
metrics: A List of `tf.keras.metrics.Metric` to aggregate.
Returns:
A `tff.federated_computation` aggregating metrics across clients by summing
their internal variables, producing new metrics with summed internal
variables, and calling metric.result() on each.
"""
# Get a sample of metric variables to use to determine its type.
sample_metric_variables = read_metric_variables(metrics)
metric_variable_type_dict = tf.nest.map_structure(tf.TensorSpec.from_tensor,
sample_metric_variables)
federated_local_outputs_type = tff.type_at_clients(metric_variable_type_dict)
def federated_output(local_outputs):
return tff.learning.federated_aggregate_keras_metric(metrics, local_outputs)
federated_output_computation = tff.federated_computation(
federated_output, federated_local_outputs_type)
return federated_output_computation
|
|
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""State and behavior for translating between sync and async control flow."""
import threading
from grpc.framework.base import interfaces as base_interfaces
from grpc.framework.face import exceptions
from grpc.framework.face import interfaces
from grpc.framework.foundation import abandonment
from grpc.framework.foundation import stream
INTERNAL_ERROR_LOG_MESSAGE = ':-( RPC Framework (Face) Internal Error! :-('
_OPERATION_OUTCOME_TO_RPC_ABORTION = {
base_interfaces.Outcome.CANCELLED: interfaces.Abortion.CANCELLED,
base_interfaces.Outcome.EXPIRED: interfaces.Abortion.EXPIRED,
base_interfaces.Outcome.RECEPTION_FAILURE:
interfaces.Abortion.NETWORK_FAILURE,
base_interfaces.Outcome.TRANSMISSION_FAILURE:
interfaces.Abortion.NETWORK_FAILURE,
base_interfaces.Outcome.SERVICED_FAILURE:
interfaces.Abortion.SERVICED_FAILURE,
base_interfaces.Outcome.SERVICER_FAILURE:
interfaces.Abortion.SERVICER_FAILURE,
}
def _as_operation_termination_callback(rpc_abortion_callback):
def operation_termination_callback(operation_outcome):
rpc_abortion = _OPERATION_OUTCOME_TO_RPC_ABORTION.get(
operation_outcome, None)
if rpc_abortion is not None:
rpc_abortion_callback(rpc_abortion)
return operation_termination_callback
def _abortion_outcome_to_exception(abortion_outcome):
if abortion_outcome == base_interfaces.Outcome.CANCELLED:
return exceptions.CancellationError()
elif abortion_outcome == base_interfaces.Outcome.EXPIRED:
return exceptions.ExpirationError()
elif abortion_outcome == base_interfaces.Outcome.SERVICER_FAILURE:
return exceptions.ServicerError()
elif abortion_outcome == base_interfaces.Outcome.SERVICED_FAILURE:
return exceptions.ServicedError()
else:
return exceptions.NetworkError()
class UnaryConsumer(stream.Consumer):
"""A stream.Consumer that should only ever be passed one value."""
def __init__(self, on_termination):
self._on_termination = on_termination
self._value = None
def consume(self, value):
self._value = value
def terminate(self):
self._on_termination(self._value)
def consume_and_terminate(self, value):
self._on_termination(value)
class Rendezvous(stream.Consumer):
"""A rendez-vous with stream.Consumer and iterator interfaces."""
def __init__(self):
self._condition = threading.Condition()
self._values = []
self._values_completed = False
self._abortion = None
def consume(self, value):
with self._condition:
self._values.append(value)
self._condition.notify()
def terminate(self):
with self._condition:
self._values_completed = True
self._condition.notify()
def consume_and_terminate(self, value):
with self._condition:
self._values.append(value)
self._values_completed = True
self._condition.notify()
def __iter__(self):
return self
def __next__(self):
return self.next()
def next(self):
with self._condition:
while ((self._abortion is None) and
(not self._values) and
(not self._values_completed)):
self._condition.wait()
if self._abortion is not None:
raise _abortion_outcome_to_exception(self._abortion)
elif self._values:
return self._values.pop(0)
elif self._values_completed:
raise StopIteration()
else:
raise AssertionError('Unreachable code reached!')
def set_outcome(self, outcome):
with self._condition:
if outcome is not base_interfaces.Outcome.COMPLETED:
self._abortion = outcome
self._condition.notify()
class RpcContext(interfaces.RpcContext):
"""A wrapped base_interfaces.OperationContext."""
def __init__(self, operation_context):
self._operation_context = operation_context
def is_active(self):
return self._operation_context.is_active()
def time_remaining(self):
return self._operation_context.time_remaining()
def add_abortion_callback(self, abortion_callback):
self._operation_context.add_termination_callback(
_as_operation_termination_callback(abortion_callback))
def pipe_iterator_to_consumer(iterator, consumer, active, terminate):
"""Pipes values emitted from an iterator to a stream.Consumer.
Args:
iterator: An iterator from which values will be emitted.
consumer: A stream.Consumer to which values will be passed.
active: A no-argument callable that returns True if the work being done by
this function is still valid and should not be abandoned and False if the
work being done by this function should be abandoned.
terminate: A boolean indicating whether or not this function should
terminate the given consumer after passing to it all values emitted by the
given iterator.
Raises:
abandonment.Abandoned: If this function quits early after seeing False
returned by the active function passed to it.
Exception: This function raises whatever exceptions are raised by iterating
over the given iterator.
"""
for element in iterator:
if not active():
raise abandonment.Abandoned()
consumer.consume(element)
if not active():
raise abandonment.Abandoned()
if terminate:
consumer.terminate()
def abortion_outcome_to_exception(abortion_outcome):
return _abortion_outcome_to_exception(abortion_outcome)
def as_operation_termination_callback(rpc_abortion_callback):
return _as_operation_termination_callback(rpc_abortion_callback)
|
|
# Copyright (c) 2011-2013 Andreas Sembrant
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sembrant
import os, sys, struct
from pyscarphase.proto import data_pb2 as data_pb
class DataReader:
'''
Read scarphase protobuf data file
<size of header>
<header>
<size of window 0>
<window 0>
<size of window 1>
<window 1>
...
'''
def __init__(self, filename, uuid=None):
self.messages = []
self.position = 0
self.eof = None
self.open(filename, uuid)
def __iter__(self):
return self
def __next__(self):
return self.next()
def open(self, filename, uuid=None):
self.file = open(filename, 'rb')
data = self.file.read(4)
if len(data) != 4:
raise EOFError()
size = struct.unpack('<i', data)[0]
data = self.file.read(size)
header = data_pb.Header()
header.ParseFromString(data)
if uuid and uuid != header.uuid:
raise Exception('UUID mismatch')
def read(self):
pass
def get(self, index):
# Change position
cur_index = self.tell()
self.seek(index)
# Get window
window = self.next()
# Restore
self.seek(cur_index)
#
return window
def __next(self, skip=True):
# Get message size
data = self.file.read(4)
# Check if end of file
if len(data) != 4:
self.eof = True
raise StopIteration()
# Add to message position list
if self.position == len(self.messages):
self.messages.append(self.file.tell() - 4)
#
self.position += 1
# Parse size
size = struct.unpack('<i', data)[0]
if skip:
self.file.seek(size, os.SEEK_CUR)
else:
# Read message
data = self.file.read(size)
# Parse message
window = data_pb.WindowData()
window.ParseFromString(data)
return window
def __read_all(self):
if not self.eof:
current_mpos, current_fpos = self.position, self.file.tell()
try:
# Go to current end
if len(self.messages) != 0:
current_end = len(self.messages) - 1
self.file.seek(self.messages[current_end])
# Find end
while True:
self.__next(skip=True)
except StopIteration:
self.position = current_mpos
self.file.seek(current_fpos)
def next(self):
return self.__next(skip=False)
def seek(self, position, whence=os.SEEK_SET):
if self.position == position:
return
if whence == os.SEEK_SET:
pass
elif whence == os.SEEK_CUR:
self.seek(self.position + position)
return
elif whence == os.SEEK_END:
if position > 0:
raise IndexError()
self.__read_all()
self.seek(len(self.messages) - 1 + position)
return
else:
pass
# If we know the offset already
if position < len(self.messages):
self.position = position
self.file.seek(self.messages[self.position])
# iterate through messages until we reach right offset
else:
if len(self.messages) > 0:
self.position = len(self.messages) - 1
self.file.seek(self.messages[self.position])
position = position - self.position
try:
while position > 0:
self.__next(skip=True)
position -= 1
except StopIteration:
raise IndexError()
def tell(self):
return self.position
class DataWriter:
'''
Read scarphase protobuf data file
<size of header>
<header>
<size of window 0>
<window 0>
<size of window 1>
<window 1>
...
'''
def __init__(self, filename, uuid=None):
self.open(filename, uuid)
def open(self, filename, uuid=None):
self.file = open(filename, 'wb')
self.uuid = uuid
header = data_pb.Header()
header.uuid = uuid
data = header.SerializeToString()
self.file.write(struct.pack('<i', len(data)))
self.file.write(data)
self.file.flush()
def write(self, window):
data = window.SerializeToString()
self.file.write(struct.pack('<i', len(data)))
self.file.write(data)
|
|
# Copyright 2017 Lajos Gerecs, Janos Czentye
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import httplib
import logging
import os
import time
import urlparse
from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from threading import Thread, Event
import requests
from testframework.testcases.basic import (EscapeTestCase,
BasicSuccessfulTestCase)
log = logging.getLogger()
class CallbackHandler(BaseHTTPRequestHandler):
"""
Handler class to handle received callback request.
"""
RESULT_PARAM_NAME = "response-code"
def do_POST (self):
self.__process_request()
self.server.callback_event.set()
self.send_response(200)
self.send_header('Connection', 'close')
self.end_headers()
def do_GET (self):
self.do_POST()
def log_message (self, format, *args):
"""
Disable logging of incoming messages.
:param format: message format
:type format: str
:return: None
"""
log.debug("%s - - Received callback [%s] %s\n" %
(self.__class__.__name__,
self.log_date_time_string(),
format % args))
def __process_request (self):
result_code = self.__get_request_params().get(self.RESULT_PARAM_NAME, None)
self.server._result = result_code
def __get_request_params (self):
"""
Examine callback request params and header field to construct a parameter
dict.
:return: parameters of the callback call
:rtype: dict
"""
params = {}
query = urlparse.urlparse(self.path).query
if query:
query = query.split('&')
for param in query:
if '=' in param:
name, value = param.split('=', 1)
params[name] = value
else:
params[param] = True
# Check message-id in headers as backup
if 'message-id' not in params:
if 'message-id' in self.headers:
params['message-id'] = self.headers['message-id']
return params
class CallbackManager(HTTPServer, Thread):
"""
Manage callback initiated from tested ESCAPE process.
Initiates a HTTP server on a separate thread and accept every received
callback call with success.
Can wait for a callback in a blocking manner.
"""
DEFAULT_SERVER_ADDRESS = "localhost"
DEFAULT_PORT = 12345
DEFAULT_WAIT_TIMEOUT = 30
def __init__ (self, address=DEFAULT_SERVER_ADDRESS, port=DEFAULT_PORT,
wait_timeout=DEFAULT_WAIT_TIMEOUT):
Thread.__init__(self, name="%s(%s:%s)" % (self.__class__.__name__,
address, port))
HTTPServer.__init__(self, (address, port), CallbackHandler)
self.daemon = True
self.callback_event = Event()
self.wait_timeout = wait_timeout
self._result = None
log.debug("\nInit %s(listen: %s:%s, wait_timeout: %s)" % (
self.__class__.__name__, self.server_name, self.server_port,
self.wait_timeout))
@property
def url (self):
return "http://%s:%s/callback" % self.server_address
@property
def last_result (self):
return self._result
def run (self):
"""
Entry point of the worker thread.
:return: None
"""
log.debug("Start %s to wait callbacks..." % self.__class__.__name__)
try:
self.serve_forever()
except KeyboardInterrupt:
raise
except Exception as e:
log.error("Got exception in %s: %s" % (self.__class__.__name__, e))
finally:
self.server_close()
log.debug("%s is stopped!" % self.__class__.__name__)
def wait_for_callback (self):
"""
Block-wait for the next callback with timeout.
:return: callback is received with OK result
:rtype: bool
"""
# Always use a timeout value because without timeout wait() func is not
# interruptable by KeyboardInterrupt
self.callback_event.wait(timeout=self.wait_timeout)
self.callback_event.clear()
return str(self.last_result) == str(httplib.OK)
def __str__ (self):
return "%s(address: %s, timeout: %s)" % (self.__class__.__name__,
self.server_address,
self.wait_timeout)
def shutdown (self):
log.debug("Shutdown %s..." % self)
super(CallbackManager, self).shutdown()
# noinspection PyAbstractClass
class RESTBasedServiceMixIn(EscapeTestCase):
"""
Initiate ESCAPE on a separate thread and feed it with the request(s)
through one of its REST-API.
"""
REQUEST_DELAY = 3
REQUEST_TIMEOUT = 1.0
REQUEST_SUCCESS_CODE = httplib.ACCEPTED
REQUEST_PREFIX = "request"
DEFAULT_URL = "http://localhost:8008/escape"
RPC_REQUEST_NFFG = "sg"
RPC_REQUEST_VIRTUALIZER = "edit-config"
def __init__ (self, url=None, delay=None, callback=False, **kwargs):
super(RESTBasedServiceMixIn, self).__init__(**kwargs)
self.url = url if url else self.DEFAULT_URL
self.delay = delay if delay is not None else self.REQUEST_DELAY
self.callback = callback
self._suppress_requests_logging()
@staticmethod
def _suppress_requests_logging (level=None):
import logging
if level is not None:
level = level
elif log.getEffectiveLevel() < logging.DEBUG:
level = log.getEffectiveLevel()
else:
level = logging.WARNING
logging.getLogger("requests").setLevel(level)
logging.getLogger("urllib3").setLevel(level)
def runTest (self):
log.debug("\nSTART test")
if self.command_runner.kill_timeout:
timeout = self.command_runner.kill_timeout + 1.0
else:
timeout = None
try:
# Init ESCAPE process in separate thread to send request through its
# REST API and be able to wait for the result
thread = Thread(target=self.run_escape)
thread.daemon = True
thread.start()
if self.callback:
self.send_requests_with_callback(shutdown=True if timeout else False)
else:
self.send_requests_with_delay(shutdown=True if timeout else False)
if timeout:
thread.join(timeout=timeout)
else:
while thread.isAlive():
thread.join(timeout=1.0)
except KeyboardInterrupt:
log.error("\nReceived KeyboardInterrupt! Abort running thread...")
self.command_runner.kill_process()
raise
if thread.isAlive():
log.error("ESCAPE process is still alive!")
self.command_runner.kill_process()
raise RuntimeError("ESCAPE's runner thread has got TIMEOUT!")
# Verify result here because logging in file is slow compared to the
# testframework
log.debug("\nSTOP test")
self.verify_result()
# Mark test case as success
self.success = True
def send_requests_with_delay (self, shutdown=True):
"""
Send all request started with a prefix in the test case folder to the
REST API of ESCAPE.
:return: None
"""
testcase_dir = self.test_case_info.testcase_dir_name
reqs = sorted([os.path.join(testcase_dir, file_name)
for file_name in os.listdir(testcase_dir)
if file_name.startswith(self.REQUEST_PREFIX)])
log.debug("Sending requests with explicit backoff time: %s..." % self.delay)
try:
for request_file in reqs:
# Wait for ESCAPE coming up, flushing to file - no callback yet
time.sleep(self.delay)
with open(request_file) as f:
ext = request_file.rsplit('.', 1)[-1]
ret = self._send_request(data=f.read(), ext=ext)
self.assertTrue(ret, msg="Got error while sending request: %s"
% request_file)
# Wait for last orchestration step before stop ESCAPE
time.sleep(self.delay)
finally:
if shutdown:
self.command_runner.stop()
def send_requests_with_callback (self, shutdown=True):
"""
Send all request started with a prefix in the test case folder to the
REST API of ESCAPE.
:return: None
"""
testcase_dir = self.test_case_info.testcase_dir_name
reqs = sorted([os.path.join(testcase_dir, file_name)
for file_name in os.listdir(testcase_dir)
if file_name.startswith(self.REQUEST_PREFIX)])
cbmanager = CallbackManager(wait_timeout=self.command_runner.kill_timeout)
cbmanager.start()
self.command_runner.wait_for_ready()
cb_url = cbmanager.url if self.callback else None
log.debug("Sending requests and waiting for callbacks...")
try:
for request in reqs:
with open(request) as f:
ext = request.rsplit('.', 1)[-1]
ret = self._send_request(data=f.read(), ext=ext, callback_url=cb_url)
self.assertTrue(ret,
msg="Got error while sending request: %s" % request)
success = cbmanager.wait_for_callback()
self.assertIsNotNone(cbmanager.last_result,
msg="Service deploy error detected! "
"No callback received!")
self.assertTrue(success, msg="Service deploy error detected! "
"Callback returned with error: %s" %
cbmanager.last_result)
finally:
if shutdown:
cbmanager.shutdown()
self.command_runner.stop()
def _send_request (self, data, ext, callback_url=None):
"""
Send one request read from file to the configured URL.
:param data: raw request data
:type data: str
:param ext: file extension to define request format
:type ext: basestring
:return: request sending was successful or not
:rtype: bool
"""
url = self.url
headers = dict()
if ext.upper() == 'XML':
headers['Content-Type'] = "application/xml"
url = urlparse.urljoin(url, self.RPC_REQUEST_VIRTUALIZER)
elif ext.upper() == 'NFFG':
headers['Content-Type'] = "application/json"
url = urlparse.urljoin(url, self.RPC_REQUEST_NFFG)
params = {"call-back": callback_url} if callback_url else {}
try:
ret = requests.post(url=url, data=data, headers=headers, params=params,
timeout=self.REQUEST_TIMEOUT)
return True if ret.status_code == self.REQUEST_SUCCESS_CODE else False
except requests.RequestException as e:
log.error("FAIL\nFailed to send request to ESCAPE: %s" % e.message)
return False
class RESTBasedSuccessfulTestCase(BasicSuccessfulTestCase,
RESTBasedServiceMixIn):
"""
Dedicated Testcase class for basic successful testing and iterated request
feeding.
"""
def __init__ (self, **kwargs):
super(RESTBasedSuccessfulTestCase, self).__init__(**kwargs)
class DoVAPISuccessfulTestCase(RESTBasedSuccessfulTestCase):
def setUp (self):
super(DoVAPISuccessfulTestCase, self).setUp()
self.ACCEPTABLE_WARNINGS.append(
"Received direct DoV rewrite request from external component without "
"any preliminary deploy request!")
|
|
'''
Copyright (C) 2014 Parrot SA
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
* Neither the name of Parrot nor the names
of its contributors may be used to endorse or promote products
derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
'''
import sys
import os
import re
import arsdkparser
MYDIR=os.path.abspath(os.path.dirname(__file__))
PACKAGES_DIR=os.path.realpath(os.path.join(MYDIR, "../.."))
sys.path.append('%(PACKAGES_DIR)s/ARSDKBuildUtils/Utils/Python' % locals())
sys.path.append('%(PACKAGES_DIR)s/libARCommands/Tools' % locals())
from ARFuncs import *
from libARCommandsgen import *
from ARControllerUtils import *
from arsdkparser import *
DEVICE_CONTROLLER_FILE_NAME = 'deviceControllers.xml'
DEVICE_CONTROLLER_FILE = PACKAGES_DIR+'/libARController/Xml/'+DEVICE_CONTROLLER_FILE_NAME
CTRL_DICT_KEY_H_NAME = 'ARCONTROLLER_DICTIONARY_Key.h'
CTRL_DICT_KEY_C_NAME = 'ARCONTROLLER_DICTIONARY_Key.c'
def generateDictionaryKeyEnum (ctx, SRC_DIR, INC_DIR):
deviceControllers = parseDeviceControllersXml (DEVICE_CONTROLLER_FILE, ctx)
#check deviceController list
if not deviceControllers:
exit (1)
ARPrint ('generateDictionaryKeyEnum ...')
#########################################
# Write Feature controller header file #
#########################################
includeDefine = '_' + MODULE_DICTIONARY + '_KEY_H_'
bref = '.h'
headerFileName = CTRL_DICT_KEY_H_NAME
filepath = INC_DIR + headerFileName
hFile = open (filepath, 'w')
hFile.write ('/**********************************************************\n')
hFile.write (' * AUTOGENERATED FILE *\n')
hFile.write (' * DO NOT MODIFY IT *\n')
hFile.write (' * *\n')
hFile.write (' * To add new commands : *\n')
hFile.write (' * - Modify ../Xml/commands.xml file *\n')
hFile.write (' * - Re-run generateDictionaryKeyEnum.py script *\n')
hFile.write (' * *\n')
hFile.write (' **********************************************************/\n')
hFile.write ('\n')
hFile.write ('/**\n')
hFile.write ('* @file '+headerFileName+'\n')
hFile.write ('* @brief '+bref+'\n')
hFile.write ('*/\n')
hFile.write ('\n')
hFile.write ('#ifndef '+includeDefine+'\n')
hFile.write ('#define '+includeDefine+'\n')
hFile.write ('\n')
hFile.write ('/**\n')
hFile.write (' * \n') # TODO add !!!!!!!!!!!!!!!!!!!!!!!!!!
hFile.write (' */\n')
hFile.write ('typedef enum \n')
hFile.write ('{\n')
first = True
for feature in ctx.features:
if first:
hFile.write (' '+defineNotification(feature)+' = 0, /**< Key used to define the feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> */\n')
first = False
else:
hFile.write (' '+defineNotification(feature)+', /**< Key used to define the feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> */\n')
for evt in feature.evts:
hFile.write (' '+defineNotification(feature, evt)+', /**< Key used to define the event <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> */\n')
hFile.write (' '+AREnumValue(MODULE_DICTIONARY, 'DICTIONARY', 'KEY','MAX')+', /**< Unused, iterator maximum value */\n')
hFile.write ('}'+defineNotificationDef()+';\n')
hFile.write ('\n')
# TODO add !!!!!!!!!!!!!!!!!!!!!!!!!!
hFile.write (''+defineNotificationDef()+' ' + ARFunctionName (MODULE_DICTIONARY, 'Key', 'GetFeatureFromCommandKey')+' ('+defineNotificationDef()+' commandKey);\n')
hFile.write ('#endif /* '+includeDefine+' */\n')
hFile.write ('\n')
hFile.write ('// END GENERATED CODE\n')
hFile.close ()
#################################################
# Write Feature controller c file #
#################################################
classTag = 'ARCONTROLLER_Device'
cFileName = CTRL_DICT_KEY_C_NAME
filepath = SRC_DIR + cFileName
cFile = open (filepath, 'w')
cFile.write ('/**********************************************************\n')
cFile.write (' * AUTOGENERATED FILE *\n')
cFile.write (' * DO NOT MODIFY IT *\n')
cFile.write (' * *\n')
cFile.write (' * To add new commands : *\n')
cFile.write (' * - Modify ../Xml/commands.xml file *\n')
cFile.write (' * - Re-run generateDictionaryKeyEnum.py script *\n')
cFile.write (' * *\n')
cFile.write (' **********************************************************/\n')
cFile.write ('\n')
cFile.write ('/**\n')
cFile.write ('* @file '+cFileName+'\n')
cFile.write ('* @brief '+bref+'\n')
cFile.write ('*/\n')
cFile.write ('\n')
cFile.write ('#include <stdio.h>\n')
cFile.write ('\n')
cFile.write ('#include <libARController/ARCONTROLLER_DICTIONARY_Key.h>\n')
cFile.write ('\n')
cFile.write (''+defineNotificationDef()+' ' + ARFunctionName (MODULE_DICTIONARY, 'Key', 'GetFeatureFromCommandKey')+' ('+defineNotificationDef()+' commandKey)\n')
cFile.write ('{\n')
cFile.write (' // -- Get Feature From Command Key --\n')
cFile.write (' \n')
cFile.write (' '+defineNotificationDef()+' featrueKey = '+AREnumValue(MODULE_DICTIONARY, 'DICTIONARY', 'KEY','MAX')+';\n')
cFile.write (' \n')
cFile.write (' // find feature parameters\n')
first = True
for index in range(len(ctx.features)-1):
feature = ctx.features[index]
featureNext = ctx.features[index+1]
ifOrElse = 'if'
if first:
ifOrElse = 'if'
first = False
else:
ifOrElse = 'else if'
nextKey = ''
if index != (len(ctx.features)-1):
nextKey = defineNotification(featureNext)
else:
nextKey = AREnumValue(MODULE_DICTIONARY, 'DICTIONARY', 'KEY','MAX')
cFile.write (' '+ifOrElse+' ('+defineNotification(feature)+' <= commandKey && commandKey < '+nextKey+')\n')
cFile.write (' {\n')
cFile.write (' featrueKey = '+defineNotification(feature)+';\n')
cFile.write (' }\n')
cFile.write (' \n')
cFile.write (' return featrueKey;\n')
cFile.write ('}\n')
cFile.write ('\n')
cFile.close ()
def generateDictionaryKeyEnumJava (ctx, JNI_JAVA_DIR):
CLASS_NAME = ARJavaEnumType (MODULE_ARCONTROLLER, 'DICTIONARY', 'Key')
JFILE_NAME = JNI_JAVA_DIR + CLASS_NAME + '.java'
UNKNOWN_VALUE = 'e'+ARJavaEnumValDef(MODULE_ARCONTROLLER, 'DICTIONARY', 'Key', 'UNKNOWN_ENUM_VALUE', True)
jfile = open(JFILE_NAME, 'w')
jfile.write(LICENCE_HEADER)
jfile.write('\n')
jfile.write ('package com.parrot.arsdk.'+MODULE_ARCONTROLLER.lower()+';\n')
jfile.write('\n')
jfile.write('import java.util.HashMap;\n')
jfile.write('\n')
jfile.write('/**\n')
jfile.write(' * Java copy of the ' + AREnumName (MODULE_ARCONTROLLER, 'DICTIONARY', 'Key') + ' enum\n')
jfile.write(' */\n')
jfile.write('public enum ' + CLASS_NAME + ' {\n')
jfile.write(' /** Dummy value for all unknown cases */\n')
jfile.write(' ' + UNKNOWN_VALUE + ' (Integer.MIN_VALUE, "Dummy value for all unknown cases"),\n')
val = 0
for feature in ctx.features:
jfile.write(' /** Key used to define the feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code> */\n')
jfile.write(' '+defineNotification(feature)+ ' (' + str(val)+ ', "Key used to define the feature <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>"),\n')
val += 1
for evt in feature.evts:
jfile.write(' /** Key used to define the event <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>*/\n')
jfile.write(' '+defineNotification(feature, evt)+' (' + str(val)+ ', "Key used to define the event <code>' + ARCapitalize (format_cmd_name(evt)) + '</code> in project <code>' + ARCapitalize (get_ftr_old_name(feature)) + '</code>"),\n')
val += 1
jfile.write(' /** Unused, iterator maximum value */\n')
jfile.write(' ARCONTROLLER_DICTIONARY_DICTIONARY_KEY_MAX (' + str(val)+ ', "Unused, iterator maximum value");\n')
jfile.write('\n')
jfile.write(' private final int value;\n')
jfile.write(' private final String comment;\n');
jfile.write(' static HashMap<Integer, ' + CLASS_NAME + '> valuesList;\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = null;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' ' + CLASS_NAME + ' (int value, String comment) {\n')
jfile.write(' this.value = value;\n')
jfile.write(' this.comment = comment;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the int value of the enum\n')
jfile.write(' * @return int value of the enum\n')
jfile.write(' */\n')
jfile.write(' public int getValue () {\n')
jfile.write(' return value;\n')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Gets the ' + CLASS_NAME + ' instance from a C enum value\n')
jfile.write(' * @param value C value of the enum\n')
jfile.write(' * @return The ' + CLASS_NAME + ' instance, or null if the C enum value was not valid\n')
jfile.write(' */\n')
jfile.write(' public static ' + CLASS_NAME + ' getFromValue (int value) {\n')
jfile.write(' if (null == valuesList) {\n')
jfile.write(' ' + CLASS_NAME + ' [] valuesArray = ' + CLASS_NAME + '.values ();\n')
jfile.write(' valuesList = new HashMap<Integer, ' + CLASS_NAME + '> (valuesArray.length);\n')
jfile.write(' for (' + CLASS_NAME + ' entry : valuesArray) {\n')
jfile.write(' valuesList.put (entry.getValue (), entry);\n')
jfile.write(' }\n')
jfile.write(' }\n')
jfile.write(' ' + CLASS_NAME + ' retVal = valuesList.get (value);\n')
jfile.write(' if (retVal == null) {\n')
jfile.write(' retVal = ' + UNKNOWN_VALUE + ';\n')
jfile.write(' }\n')
jfile.write(' return retVal;')
jfile.write(' }\n')
jfile.write('\n')
jfile.write(' /**\n')
jfile.write(' * Returns the enum comment as a description string\n')
jfile.write(' * @return The enum description\n')
jfile.write(' */\n')
jfile.write(' public String toString () {\n')
jfile.write(' if (this.comment != null) {\n')
jfile.write(' return this.comment;\n')
jfile.write(' }\n')
jfile.write(' return super.toString ();\n')
jfile.write(' }\n')
jfile.write('}\n')
jfile.close()
def list_files_dict_key (ctx, SRC_DIR, INC_DIR):
''' Print device dictionary key generated files '''
print(INC_DIR + CTRL_DICT_KEY_H_NAME)
print(SRC_DIR + CTRL_DICT_KEY_C_NAME)
def list_files_dict_key_java (ctx, JNI_JAVA_DIR):
''' Print device dictionary key generated files '''
CLASS_NAME = ARJavaEnumType (MODULE_ARCONTROLLER, 'DICTIONARY', 'Key')
JFILE_NAME = JNI_JAVA_DIR + CLASS_NAME + '.java'
print(JFILE_NAME)
|
|
# Copyright (c) 2017-2019 Uber Technologies, Inc.
# SPDX-License-Identifier: Apache-2.0
import math
import torch
from torch.distributions.utils import lazy_property
from torch.nn.functional import pad
from pyro.distributions.util import broadcast_shape
from pyro.ops.tensor_utils import cholesky, matmul, matvecmul, triangular_solve
class Gaussian:
"""
Non-normalized Gaussian distribution.
This represents an arbitrary semidefinite quadratic function, which can be
interpreted as a rank-deficient scaled Gaussian distribution. The precision
matrix may have zero eigenvalues, thus it may be impossible to work
directly with the covariance matrix.
:param torch.Tensor log_normalizer: a normalization constant, which is mainly used to keep
track of normalization terms during contractions.
:param torch.Tensor info_vec: information vector, which is a scaled version of the mean
``info_vec = precision @ mean``. We use this represention to make gaussian contraction
fast and stable.
:param torch.Tensor precision: precision matrix of this gaussian.
"""
def __init__(self, log_normalizer, info_vec, precision):
# NB: using info_vec instead of mean to deal with rank-deficient problem
assert info_vec.dim() >= 1
assert precision.dim() >= 2
assert precision.shape[-2:] == info_vec.shape[-1:] * 2
self.log_normalizer = log_normalizer
self.info_vec = info_vec
self.precision = precision
def dim(self):
return self.info_vec.size(-1)
@lazy_property
def batch_shape(self):
return broadcast_shape(
self.log_normalizer.shape,
self.info_vec.shape[:-1],
self.precision.shape[:-2],
)
def expand(self, batch_shape):
n = self.dim()
log_normalizer = self.log_normalizer.expand(batch_shape)
info_vec = self.info_vec.expand(batch_shape + (n,))
precision = self.precision.expand(batch_shape + (n, n))
return Gaussian(log_normalizer, info_vec, precision)
def reshape(self, batch_shape):
n = self.dim()
log_normalizer = self.log_normalizer.reshape(batch_shape)
info_vec = self.info_vec.reshape(batch_shape + (n,))
precision = self.precision.reshape(batch_shape + (n, n))
return Gaussian(log_normalizer, info_vec, precision)
def __getitem__(self, index):
"""
Index into the batch_shape of a Gaussian.
"""
assert isinstance(index, tuple)
log_normalizer = self.log_normalizer[index]
info_vec = self.info_vec[index + (slice(None),)]
precision = self.precision[index + (slice(None), slice(None))]
return Gaussian(log_normalizer, info_vec, precision)
@staticmethod
def cat(parts, dim=0):
"""
Concatenate a list of Gaussians along a given batch dimension.
"""
if dim < 0:
dim += len(parts[0].batch_shape)
args = [
torch.cat([getattr(g, attr) for g in parts], dim=dim)
for attr in ["log_normalizer", "info_vec", "precision"]
]
return Gaussian(*args)
def event_pad(self, left=0, right=0):
"""
Pad along event dimension.
"""
lr = (left, right)
log_normalizer = self.log_normalizer
info_vec = pad(self.info_vec, lr)
precision = pad(self.precision, lr + lr)
return Gaussian(log_normalizer, info_vec, precision)
def event_permute(self, perm):
"""
Permute along event dimension.
"""
assert isinstance(perm, torch.Tensor)
assert perm.shape == (self.dim(),)
info_vec = self.info_vec[..., perm]
precision = self.precision[..., perm][..., perm, :]
return Gaussian(self.log_normalizer, info_vec, precision)
def __add__(self, other):
"""
Adds two Gaussians in log-density space.
"""
if isinstance(other, Gaussian):
assert self.dim() == other.dim()
return Gaussian(
self.log_normalizer + other.log_normalizer,
self.info_vec + other.info_vec,
self.precision + other.precision,
)
if isinstance(other, (int, float, torch.Tensor)):
return Gaussian(self.log_normalizer + other, self.info_vec, self.precision)
raise ValueError("Unsupported type: {}".format(type(other)))
def __sub__(self, other):
if isinstance(other, (int, float, torch.Tensor)):
return Gaussian(self.log_normalizer - other, self.info_vec, self.precision)
raise ValueError("Unsupported type: {}".format(type(other)))
def log_density(self, value):
"""
Evaluate the log density of this Gaussian at a point value::
-0.5 * value.T @ precision @ value + value.T @ info_vec + log_normalizer
This is mainly used for testing.
"""
if value.size(-1) == 0:
batch_shape = broadcast_shape(value.shape[:-1], self.batch_shape)
return self.log_normalizer.expand(batch_shape)
result = (-0.5) * matvecmul(self.precision, value)
result = result + self.info_vec
result = (value * result).sum(-1)
return result + self.log_normalizer
def rsample(self, sample_shape=torch.Size()):
"""
Reparameterized sampler.
"""
P_chol = cholesky(self.precision)
loc = self.info_vec.unsqueeze(-1).cholesky_solve(P_chol).squeeze(-1)
shape = sample_shape + self.batch_shape + (self.dim(), 1)
noise = torch.randn(shape, dtype=loc.dtype, device=loc.device)
noise = triangular_solve(noise, P_chol, upper=False, transpose=True).squeeze(-1)
return loc + noise
def condition(self, value):
"""
Condition this Gaussian on a trailing subset of its state.
This should satisfy::
g.condition(y).dim() == g.dim() - y.size(-1)
Note that since this is a non-normalized Gaussian, we include the
density of ``y`` in the result. Thus :meth:`condition` is similar to a
``functools.partial`` binding of arguments::
left = x[..., :n]
right = x[..., n:]
g.log_density(x) == g.condition(right).log_density(left)
"""
assert isinstance(value, torch.Tensor)
right = value.size(-1)
dim = self.dim()
assert right <= dim
n = dim - right
info_a = self.info_vec[..., :n]
info_b = self.info_vec[..., n:]
P_aa = self.precision[..., :n, :n]
P_ab = self.precision[..., :n, n:]
P_bb = self.precision[..., n:, n:]
b = value
info_vec = info_a - matvecmul(P_ab, b)
precision = P_aa
log_normalizer = (
self.log_normalizer
+ -0.5 * matvecmul(P_bb, b).mul(b).sum(-1)
+ b.mul(info_b).sum(-1)
)
return Gaussian(log_normalizer, info_vec, precision)
def left_condition(self, value):
"""
Condition this Gaussian on a leading subset of its state.
This should satisfy::
g.condition(y).dim() == g.dim() - y.size(-1)
Note that since this is a non-normalized Gaussian, we include the
density of ``y`` in the result. Thus :meth:`condition` is similar to a
``functools.partial`` binding of arguments::
left = x[..., :n]
right = x[..., n:]
g.log_density(x) == g.left_condition(left).log_density(right)
"""
assert isinstance(value, torch.Tensor)
left = value.size(-1)
dim = self.dim()
assert left <= dim
perm = torch.cat(
[
torch.arange(left, dim, device=value.device),
torch.arange(left, device=value.device),
]
)
return self.event_permute(perm).condition(value)
def marginalize(self, left=0, right=0):
"""
Marginalizing out variables on either side of the event dimension::
g.marginalize(left=n).event_logsumexp() = g.logsumexp()
g.marginalize(right=n).event_logsumexp() = g.logsumexp()
and for data ``x``:
g.condition(x).event_logsumexp()
= g.marginalize(left=g.dim() - x.size(-1)).log_density(x)
"""
if left == 0 and right == 0:
return self
if left > 0 and right > 0:
raise NotImplementedError
n = self.dim()
n_b = left + right
a = slice(left, n - right) # preserved
b = slice(None, left) if left else slice(n - right, None)
P_aa = self.precision[..., a, a]
P_ba = self.precision[..., b, a]
P_bb = self.precision[..., b, b]
P_b = cholesky(P_bb)
P_a = triangular_solve(P_ba, P_b, upper=False)
P_at = P_a.transpose(-1, -2)
precision = P_aa - matmul(P_at, P_a)
info_a = self.info_vec[..., a]
info_b = self.info_vec[..., b]
b_tmp = triangular_solve(info_b.unsqueeze(-1), P_b, upper=False)
info_vec = info_a - matmul(P_at, b_tmp).squeeze(-1)
log_normalizer = (
self.log_normalizer
+ 0.5 * n_b * math.log(2 * math.pi)
- P_b.diagonal(dim1=-2, dim2=-1).log().sum(-1)
+ 0.5 * b_tmp.squeeze(-1).pow(2).sum(-1)
)
return Gaussian(log_normalizer, info_vec, precision)
def event_logsumexp(self):
"""
Integrates out all latent state (i.e. operating on event dimensions).
"""
n = self.dim()
chol_P = cholesky(self.precision)
chol_P_u = triangular_solve(
self.info_vec.unsqueeze(-1), chol_P, upper=False
).squeeze(-1)
u_P_u = chol_P_u.pow(2).sum(-1)
return (
self.log_normalizer
+ 0.5 * n * math.log(2 * math.pi)
+ 0.5 * u_P_u
- chol_P.diagonal(dim1=-2, dim2=-1).log().sum(-1)
)
class AffineNormal:
"""
Represents a conditional diagonal normal distribution over a random
variable ``Y`` whose mean is an affine function of a random variable ``X``.
The likelihood of ``X`` is thus::
AffineNormal(matrix, loc, scale).condition(y).log_density(x)
which is equivalent to::
Normal(x @ matrix + loc, scale).to_event(1).log_prob(y)
:param torch.Tensor matrix: A transformation from ``X`` to ``Y``.
Should have rightmost shape ``(x_dim, y_dim)``.
:param torch.Tensor loc: A constant offset for ``Y``'s mean.
Should have rightmost shape ``(y_dim,)``.
:param torch.Tensor scale: Standard deviation for ``Y``.
Should have rightmost shape ``(y_dim,)``.
"""
def __init__(self, matrix, loc, scale):
assert loc.shape == scale.shape
assert matrix.shape[:-2] == loc.shape[:-1]
assert matrix.size(-1) == loc.size(-1)
self.matrix = matrix
self.loc = loc
self.scale = scale
self._gaussian = None
@lazy_property
def batch_shape(self):
return self.matrix.shape[:-2]
def condition(self, value):
if value.size(-1) == self.loc.size(-1):
prec_sqrt = self.matrix / self.scale.unsqueeze(-2)
precision = matmul(prec_sqrt, prec_sqrt.transpose(-1, -2))
delta = (value - self.loc) / self.scale
info_vec = matvecmul(prec_sqrt, delta)
log_normalizer = (
-0.5 * self.loc.size(-1) * math.log(2 * math.pi)
- 0.5 * delta.pow(2).sum(-1)
- self.scale.log().sum(-1)
)
return Gaussian(log_normalizer, info_vec, precision)
else:
return self.to_gaussian().condition(value)
def left_condition(self, value):
"""
If ``value.size(-1) == x_dim``, this returns a Normal distribution with
``event_dim=1``. After applying this method, the cost to draw a sample is
``O(y_dim)`` instead of ``O(y_dim ** 3)``.
"""
if value.size(-1) == self.matrix.size(-2):
loc = matvecmul(self.matrix.transpose(-1, -2), value) + self.loc
matrix = value.new_zeros(loc.shape[:-1] + (0, loc.size(-1)))
scale = self.scale.expand(loc.shape)
return AffineNormal(matrix, loc, scale)
else:
return self.to_gaussian().left_condition(value)
def rsample(self, sample_shape=torch.Size()):
"""
Reparameterized sampler.
"""
if self.matrix.size(-2) > 0:
raise NotImplementedError
shape = sample_shape + self.batch_shape + self.loc.shape[-1:]
noise = torch.randn(shape, dtype=self.loc.dtype, device=self.loc.device)
return self.loc + noise * self.scale
def to_gaussian(self):
if self._gaussian is None:
mvn = torch.distributions.Independent(
torch.distributions.Normal(self.loc, scale=self.scale), 1
)
y_gaussian = mvn_to_gaussian(mvn)
self._gaussian = _matrix_and_gaussian_to_gaussian(self.matrix, y_gaussian)
return self._gaussian
def expand(self, batch_shape):
matrix = self.matrix.expand(batch_shape + self.matrix.shape[-2:])
loc = self.loc.expand(batch_shape + self.loc.shape[-1:])
scale = self.scale.expand(batch_shape + self.scale.shape[-1:])
return AffineNormal(matrix, loc, scale)
def reshape(self, batch_shape):
matrix = self.matrix.reshape(batch_shape + self.matrix.shape[-2:])
loc = self.loc.reshape(batch_shape + self.loc.shape[-1:])
scale = self.scale.reshape(batch_shape + self.scale.shape[-1:])
return AffineNormal(matrix, loc, scale)
def __getitem__(self, index):
assert isinstance(index, tuple)
matrix = self.matrix[index + (slice(None), slice(None))]
loc = self.loc[index + (slice(None),)]
scale = self.scale[index + (slice(None),)]
return AffineNormal(matrix, loc, scale)
def event_permute(self, perm):
return self.to_gaussian().event_permute(perm)
def __add__(self, other):
return self.to_gaussian() + other
def marginalize(self, left=0, right=0):
if left == 0 and right == self.loc.size(-1):
n = self.matrix.size(-2)
precision = self.scale.new_zeros(self.batch_shape + (n, n))
info_vec = self.scale.new_zeros(self.batch_shape + (n,))
log_normalizer = self.scale.new_zeros(self.batch_shape)
return Gaussian(log_normalizer, info_vec, precision)
else:
return self.to_gaussian().marginalize(left, right)
def mvn_to_gaussian(mvn):
"""
Convert a MultivariateNormal distribution to a Gaussian.
:param ~torch.distributions.MultivariateNormal mvn: A multivariate normal distribution.
:return: An equivalent Gaussian object.
:rtype: ~pyro.ops.gaussian.Gaussian
"""
assert isinstance(mvn, torch.distributions.MultivariateNormal) or (
isinstance(mvn, torch.distributions.Independent)
and isinstance(mvn.base_dist, torch.distributions.Normal)
)
if isinstance(mvn, torch.distributions.Independent):
mvn = mvn.base_dist
precision_diag = mvn.scale.pow(-2)
precision = precision_diag.diag_embed()
info_vec = mvn.loc * precision_diag
scale_diag = mvn.scale
else:
precision = mvn.precision_matrix
info_vec = matvecmul(precision, mvn.loc)
scale_diag = mvn.scale_tril.diagonal(dim1=-2, dim2=-1)
n = mvn.loc.size(-1)
log_normalizer = (
-0.5 * n * math.log(2 * math.pi)
+ -0.5 * (info_vec * mvn.loc).sum(-1)
- scale_diag.log().sum(-1)
)
return Gaussian(log_normalizer, info_vec, precision)
def _matrix_and_gaussian_to_gaussian(matrix, y_gaussian):
P_yy = y_gaussian.precision
neg_P_xy = matmul(matrix, P_yy)
P_xy = -neg_P_xy
P_yx = P_xy.transpose(-1, -2)
P_xx = matmul(neg_P_xy, matrix.transpose(-1, -2))
precision = torch.cat(
[torch.cat([P_xx, P_xy], -1), torch.cat([P_yx, P_yy], -1)], -2
)
info_y = y_gaussian.info_vec
info_x = -matvecmul(matrix, info_y)
info_vec = torch.cat([info_x, info_y], -1)
log_normalizer = y_gaussian.log_normalizer
result = Gaussian(log_normalizer, info_vec, precision)
return result
def matrix_and_mvn_to_gaussian(matrix, mvn):
"""
Convert a noisy affine function to a Gaussian. The noisy affine function is defined as::
y = x @ matrix + mvn.sample()
:param ~torch.Tensor matrix: A matrix with rightmost shape ``(x_dim, y_dim)``.
:param ~torch.distributions.MultivariateNormal mvn: A multivariate normal distribution.
:return: A Gaussian with broadcasted batch shape and ``.dim() == x_dim + y_dim``.
:rtype: ~pyro.ops.gaussian.Gaussian
"""
assert isinstance(mvn, torch.distributions.MultivariateNormal) or (
isinstance(mvn, torch.distributions.Independent)
and isinstance(mvn.base_dist, torch.distributions.Normal)
)
assert isinstance(matrix, torch.Tensor)
x_dim, y_dim = matrix.shape[-2:]
assert mvn.event_shape == (y_dim,)
batch_shape = broadcast_shape(matrix.shape[:-2], mvn.batch_shape)
matrix = matrix.expand(batch_shape + (x_dim, y_dim))
mvn = mvn.expand(batch_shape)
# Handle diagonal normal distributions as an efficient special case.
if isinstance(mvn, torch.distributions.Independent):
return AffineNormal(matrix, mvn.base_dist.loc, mvn.base_dist.scale)
y_gaussian = mvn_to_gaussian(mvn)
result = _matrix_and_gaussian_to_gaussian(matrix, y_gaussian)
assert result.batch_shape == batch_shape
assert result.dim() == x_dim + y_dim
return result
def gaussian_tensordot(x, y, dims=0):
"""
Computes the integral over two gaussians:
`(x @ y)(a,c) = log(integral(exp(x(a,b) + y(b,c)), b))`,
where `x` is a gaussian over variables (a,b), `y` is a gaussian over variables
(b,c), (a,b,c) can each be sets of zero or more variables, and `dims` is the size of b.
:param x: a Gaussian instance
:param y: a Gaussian instance
:param dims: number of variables to contract
"""
assert isinstance(x, Gaussian)
assert isinstance(y, Gaussian)
na = x.dim() - dims
nb = dims
nc = y.dim() - dims
assert na >= 0
assert nb >= 0
assert nc >= 0
Paa, Pba, Pbb = (
x.precision[..., :na, :na],
x.precision[..., na:, :na],
x.precision[..., na:, na:],
)
Qbb, Qbc, Qcc = (
y.precision[..., :nb, :nb],
y.precision[..., :nb, nb:],
y.precision[..., nb:, nb:],
)
xa, xb = x.info_vec[..., :na], x.info_vec[..., na:] # x.precision @ x.mean
yb, yc = y.info_vec[..., :nb], y.info_vec[..., nb:] # y.precision @ y.mean
precision = pad(Paa, (0, nc, 0, nc)) + pad(Qcc, (na, 0, na, 0))
info_vec = pad(xa, (0, nc)) + pad(yc, (na, 0))
log_normalizer = x.log_normalizer + y.log_normalizer
if nb > 0:
B = pad(Pba, (0, nc)) + pad(Qbc, (na, 0))
b = xb + yb
# Pbb + Qbb needs to be positive definite, so that we can malginalize out `b` (to have a finite integral)
L = cholesky(Pbb + Qbb)
LinvB = triangular_solve(B, L, upper=False)
LinvBt = LinvB.transpose(-2, -1)
Linvb = triangular_solve(b.unsqueeze(-1), L, upper=False)
precision = precision - matmul(LinvBt, LinvB)
# NB: precision might not be invertible for getting mean = precision^-1 @ info_vec
if na + nc > 0:
info_vec = info_vec - matmul(LinvBt, Linvb).squeeze(-1)
logdet = torch.diagonal(L, dim1=-2, dim2=-1).log().sum(-1)
diff = (
0.5 * nb * math.log(2 * math.pi)
+ 0.5 * Linvb.squeeze(-1).pow(2).sum(-1)
- logdet
)
log_normalizer = log_normalizer + diff
return Gaussian(log_normalizer, info_vec, precision)
|
|
#####################################################################################
#
# Copyright (c) Microsoft Corporation. All rights reserved.
#
# This source code is subject to terms and conditions of the Apache License, Version 2.0. A
# copy of the license can be found in the License.html file at the root of this distribution. If
# you cannot locate the Apache License, Version 2.0, please send an email to
# [email protected]. By using this source code in any fashion, you are agreeing to be bound
# by the terms of the Apache License, Version 2.0.
#
# You must not remove this notice, or any other, from this software.
#
#
#####################################################################################
##
## Testing IronPython Engine
##
from iptest.assert_util import *
skiptest("win32")
import sys
if not is_silverlight:
remove_ironpython_dlls(testpath.public_testdir)
load_iron_python_dll()
# setup Scenario tests in module from EngineTest.cs
# this enables us to see the individual tests that pass / fail
load_iron_python_test()
import IronPython
import IronPythonTest
et = IronPythonTest.EngineTest()
multipleexecskips = [ ]
for s in dir(et):
if s.startswith("Scenario"):
if s in multipleexecskips:
exec '@skip("multiple_execute") \ndef test_Engine_%s(): getattr(et, "%s")()' % (s, s)
else :
exec 'def test_Engine_%s(): getattr(et, "%s")()' % (s, s)
#Rowan Work Item 312902
@disabled("The ProfileDrivenCompilation feature is removed from DLR")
def test_deferred_compilation():
save1 = IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode
save2 = IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation
modules = sys.modules.copy()
IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation = True # this will enable interpreted mode
Assert(IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode)
try:
# Just import some modules to make sure we can switch to compilation without blowing up
import test_namebinding
import test_function
import test_tcf
finally:
IronPythonTest.TestHelpers.GetContext().Options.InterpretedMode = save1
IronPythonTest.TestHelpers.GetContext().Options.ProfileDrivenCompilation = save2
sys.modules = modules
def CreateOptions():
import sys
import clr
o = IronPython.PythonEngineOptions()
if sys.argv.count('-X:ExceptionDetail') > 0: o.ExceptionDetail = True
return o
def a():
raise System.Exception()
def b():
try:
a()
except System.Exception, e:
raise System.Exception("second", e)
def c():
try:
b()
except System.Exception, e:
x = System.Exception("first", e)
return x
#Rowan Work Item 312902
@skip("silverlight", "multiple_execute")
def test_formatexception():
try:
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine()
service = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]()
AssertError(TypeError, service.FormatException, None)
exc_string = service.FormatException(System.Exception("first",
System.Exception("second",
System.Exception())))
AreEqual(exc_string, 'Traceback (most recent call last):\r\nException: first')
exc_string = service.FormatException(c())
AreEqual(exc_string.count(" File "), 4)
AreEqual(exc_string.count(" line "), 4)
finally:
pass
#Rowan Work Item 31290
@skip("silverlight")
def test_formatexception_showclrexceptions():
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine({'ShowClrExceptions': True})
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(System.Exception("first",
System.Exception("second",
System.Exception())))
AreEqual(exc_string, "Traceback (most recent call last):\r\nException: first\r\nCLR Exception: \r\n Exception\r\n: \r\nfirst\r\n Exception\r\n: \r\nsecond\r\n Exception\r\n: \r\nException of type 'System.Exception' was thrown.\r\n")
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(c())
AreEqual(exc_string.count(" File "), 4)
AreEqual(exc_string.count(" line "), 4)
Assert(exc_string.endswith("CLR Exception: \r\n Exception\r\n: \r\nfirst\r\n Exception\r\n: \r\nsecond\r\n Exception\r\n: \r\nException of type 'System.Exception' was thrown.\r\n"))
@skip("silverlight", "multiple_execute") #CodePlex 20636 - multi-execute
def test_formatexception_exceptiondetail():
import Microsoft.Scripting
from IronPython.Hosting import Python
pe = Python.CreateEngine({'ExceptionDetail': True})
try:
x = System.Collections.Generic.Dictionary[object, object]()
x[None] = 42
except System.Exception, e:
pass
import re
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(System.Exception("first", e))
Assert(exc_string.startswith("first"))
Assert(re.match("first\r\n( at .*ThrowArgumentNullException.*\n)? at .*Insert.*\n( at .*\n)*",exc_string) is not None)
exc_string = pe.GetService[Microsoft.Scripting.Hosting.ExceptionOperations]().FormatException(c())
Assert(exc_string.endswith("Exception: first"))
@skip("silverlight")
def test_engine_access_from_within():
import clr
from Microsoft.Scripting.Hosting import ScriptEngine
pc = clr.GetCurrentRuntime().GetLanguageByName('python')
engine = pc.GetModuleState(clr.GetClrType(ScriptEngine))
Assert(engine is not None)
def test_import_clr():
from IronPython.Hosting import Python
eng = Python.CreateEngine()
mod = Python.ImportModule(eng, 'clr')
Assert('ToString' not in eng.Operations.GetMemberNames(42))
@skip("silverlight")
def test_cp6703():
import clr
clr.AddReference("IronPython")
import IronPython
pe = IronPython.Hosting.Python.CreateEngine()
stuff = '''
import System
a = 2
globals()["b"] = None
globals().Add("c", "blah")
joe = System.Collections.Generic.KeyValuePair[object,object]("d", int(3))
globals().Add(joe)
count = 0
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("b", None)): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("c", "blah")): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", int(3))): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", 3)): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("d", "3")): count += 1
if globals().Contains(System.Collections.Generic.KeyValuePair[object,object]("a", 2)): count += 1
'''
s = pe.CreateScope()
pe.Execute(stuff, s)
AreEqual(s.count, 6)
def test_cp20594():
import IronPython
AreEqual(IronPython.Runtime.PythonContext.GetIronPythonAssembly("IronPython").split(",", 1)[1],
IronPython.Runtime.PythonContext.GetIronPythonAssembly("IronPython.Modules").split(",", 1)[1])
def test_cp27547():
import clr
clr.AddReference('IronPython')
clr.AddReference('Microsoft.Scripting')
from IronPython.Hosting import Python
from Microsoft.Scripting import SourceCodeKind, ScriptCodeParseResult
engine = Python.CreateEngine()
scope = engine.CreateScope()
text = 'lambda'
source = engine.CreateScriptSourceFromString(text, 'stdin',
SourceCodeKind.InteractiveCode)
result = source.GetCodeProperties()
AreEqual(result, ScriptCodeParseResult.IncompleteToken)
def test_hidden_base():
from IronPythonTest import DerivedFromHiddenBase
a = DerivedFromHiddenBase()
AreEqual(a.Accessible(), 42)
AssertError(AttributeError, lambda: a.Inaccessible)
def test_cp27150():
from IronPythonTest import GenericProperty
from System import DateTime
wrapper = GenericProperty[DateTime]()
def f():
wrapper.Value = None
AssertError(TypeError, f)
#--MAIN------------------------------------------------------------------------
run_test(__name__)
#Make sure this runs last
#test_dispose()
|
|
# Class definition:
# Experiment
# This class is the main experiment class; ATLAS etc will inherit from this class
# Instances are generated with ExperimentFactory
# Subclasses should implement all needed methods prototyped in this class
# Note: not compatible with Singleton Design Pattern due to the subclassing
import os
import re
import time
import commands
from subprocess import Popen, PIPE
from PilotErrors import PilotErrors
from pUtil import tolog # Dump to pilot log
from pUtil import readpar # Used to read values from the schedconfig DB (queuedata)
from pUtil import getCmtconfig # cmtconfig (move to subclass)
from pUtil import getDirectAccessDic # Get the direct access dictionary
from pUtil import isBuildJob # Is the current job a build job?
from pUtil import remove # Used to remove redundant file before log file creation
from pUtil import getPilotlogFilename # Used in the subprocess arguments method
from pUtil import extractHPCInfo # Used by getSubprocessName() to determine HPC plug-in if necessary
class Experiment(object):
# experiment = "generic" # String defining the experiment
# private data members
__experiment = "generic" # String defining the experiment
__instance = None # Boolean used by subclasses to become a Singleton
__error = PilotErrors() # PilotErrors object
__doFileLookups = False # True for LFC based file lookups (basically a dummy data member here since singleton object is static)
__cache = "" # Cache URL used e.g. by LSST
# Required methods
def __init__(self, *args, **kwargs):
""" Default initialization """
# e.g. self.__errorLabel = errorLabel
# self.experiment = kwargs.get('experiment')
pass
def getExperiment(self):
""" Return a string with the experiment name """
# return self.experiment
return self.__experiment
def getJobExecutionCommand(self):
""" Define and test the command(s) that will be used to execute the payload """
# E.g. cmd = "source <path>/setup.sh; <path>/python <script>"
cmd = ""
return cmd
def getFileLookups(self):
""" Return the file lookup boolean """
return self.__doFileLookups
def doFileLookups(self, doFileLookups):
""" Update the file lookups boolean """
# Only implement this method if class really wants to update the __doFileLookups boolean
# ATLAS wants to implement this, but not CMS
# Method is used by Mover
# self.__doFileLookups = doFileLookups
pass
def willDoAlternativeFileLookups(self):
""" Should file lookups be done using alternative methods? """
# E.g. in the migration period where LFC lookups are halted in favour of other methods in the Rucio API
# (for ATLAS), this method could be useful. See the usage in Mover::getReplicaDictionary() which is called
# after Experiment::willDoFileLookups() defined above. The motivation is that direct LFC calls are not to be
# used any longer by the pilot, and in the migration period the actual LFC calls will be done in the Rucio
# API. Eventually this API will switch to alternative file lookups.
return False
def willDoFileLookups(self):
""" Should (LFC) file lookups be done by the pilot or not? """
return self.__doFileLookups
def willDoFileRegistration(self):
""" Should (LFC) file registration be done by the pilot or not? """
return False
def getFileCatalog(self):
""" Return the default file catalog to use (e.g. for replica lookups) """
# See usage in Mover.py
# e.g. 'lfc://prod-lfc-atlas.cern.ch:/grid/atlas'
return ""
# Additional optional methods
# These methods are optional and can be left as they are here, or modified according to special needs
def verifyProxy(self, envsetup="", limit=None):
""" Check for a valid voms/grid proxy longer than N hours """
# Use 'limit' to set required length
tolog("(verifyProxy() is not implemented)")
exitcode = 0
pilotErrorDiag = ""
return exitcode, pilotErrorDiag
def removeRedundantFiles(self, workdir):
""" Remove redundant files and directories """
# List of files and directories to be removed from work directory prior to log file creation
# Make sure that any large files or directories that are not wanted in the log file are included in this list
dir_list = [
"buildJob*",
"external",
"fort.*",
"home",
"python",
"share",
"workdir",
"*.py",
"*.pyc",
"*.root*",
"JEM",
"tmp*",
"*.tmp",
"*.TMP",
"scratch",
]
for _dir in dir_list:
files = glob(os.path.join(workdir, _dir))
rc = remove(files)
if not rc:
tolog("IGNORE: Failed to remove redundant file(s): %s" % (files))
def getPayloadName(self, job):
""" Set a suitable name for the payload stdout """
# The payload <name> gets translated into <name>_stdout.txt
# which is the name of the stdout file produced by the payload execution
# (essentially commands.getoutput("<setup>; <payload executable> [options] > <name>_stdout.txt"))
# The job object can be used to create more precise stdout names (see e.g. the ATLASExperiment implementation)
return "payload"
def isOutOfMemory(self, **kwargs):
""" Try to identify out of memory errors in the stderr/out """
return False
def getNumberOfEvents(self, **kwargs):
""" Return the number of events """
return 0
def specialChecks(self, **kwargs):
""" Implement special checks here """
# Return False if fatal failure, otherwise return True
# The pilot will abort if this method returns a False
# On an HPC system, it might be good to skip certain checks (e.g. CVMFS, LFC, etc). Refer to schedconfig.resourcetype, set to 'hpc' on an HPC queue
status = False
tolog("No special checks for \'%s\'" % (self.experiment))
return True # obviously change this to 'status' once implemented
def checkSpecialEnvVars(self, sitename):
""" Check special environment variables """
ec = 0
tolog("No special env var checks for site %s" % (sitename))
return ec
def setINDS(self, realDatasetsIn):
""" Extract the dataset as set by pathena option --inDS and set the INDS environmental variable """
# Needed by pathena (move to ATLASExperiment later)
inDS = ""
for ds in realDatasetsIn:
if "DBRelease" not in ds and ".lib." not in ds:
inDS = ds
break
if inDS != "":
tolog("Setting INDS env variable to: %s" % (inDS))
os.environ['INDS'] = inDS
else:
tolog("INDS unknown")
def getValidBaseURLs(self, order=None):
""" Return list of valid base URLs """
# if order is defined, return given item first
# e.g. order=http://atlpan.web.cern.ch/atlpan -> ['http://atlpan.web.cern.ch/atlpan', ...]
validBaseURLs = []
_validBaseURLs = ["http://www.usatlas.bnl.gov",\
"https://www.usatlas.bnl.gov",\
"http://pandaserver.cern.ch",\
"http://atlpan.web.cern.ch/atlpan",\
"https://atlpan.web.cern.ch/atlpan",\
"http://classis01.roma1.infn.it",\
"http://atlas-install.roma1.infn.it"]
if order:
validBaseURLs.append(order)
for url in _validBaseURLs:
if url != order:
validBaseURLs.append(url)
else:
validBaseURLs = _validBaseURLs
tolog("getValidBaseURLs will return: %s" % str(validBaseURLs))
return validBaseURLs
def downloadTrf(self, wgetCommand, jobTrf):
""" Download the trf """
status = False
pilotErrorDiag = ""
cmd = "%s %s" % (wgetCommand, jobTrf)
trial = 1
max_trials = 3
# try to download the trf a maximum of 3 times
while trial <= max_trials:
tolog("Executing command [Trial %d/%d]: %s" % (trial, max_trials, cmd))
ec, rets = commands.getstatusoutput(cmd)
if not rets:
rets = "(None)"
if ec != 0:
# Analyze exit code / output
from futil import check_syserr
check_syserr(ec, rets)
pilotErrorDiag = "wget command failed: %d, %s" % (ec, rets)
tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
if trial == max_trials:
tolog("!!FAILED!!3000!! Could not download trf: %s" % (rets))
status = False
break
else:
tolog("Will try again after 60s..")
from time import sleep
sleep(60)
else:
pilotErrorDiag = ""
tolog("wget command returned: %s" % (rets))
status = True
break
trial += 1
return status, pilotErrorDiag
def getAnalysisTrf(self, wgetCommand, origTRF, pilot_initdir):
""" Get the trf to be used for analysis jobs """
pilotErrorDiag = ""
trfName = origTRF.split('/')[-1]
tolog("trfName = %s" % (trfName))
origBaseURL = ""
# Copy trf from pilot init dir if distributed with pilot code
fname = os.path.join(pilot_initdir, trfName)
status = False
if os.path.exists(fname):
from shutil import copy2
try:
copy2(fname, os.getcwd())
except Exception, e:
tolog("!!WARNING!!2999!! Could not copy trf from pilot init dir: %s" % str(e))
else:
tolog("Copied trf (%s) from pilot init dir" % (fname))
status = True
# Download trf
if not status:
# verify the base URL
for baseURL in self.getValidBaseURLs():
if origTRF.startswith(baseURL):
origBaseURL = baseURL
break
if origBaseURL == "":
pilotErrorDiag = "Invalid base URL: %s" % (origTRF)
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
else:
tolog("Verified the trf base URL: %s" % (origBaseURL))
# try to download from the required location, if not - switch to backup
for baseURL in self.getValidBaseURLs(order=origBaseURL):
trf = re.sub(origBaseURL, baseURL, origTRF)
tolog("Attempting to download trf: %s" % (trf))
status, pilotErrorDiag = self.downloadTrf(wgetCommand, trf)
if status:
break
if not status:
return self.__error.ERR_TRFDOWNLOAD, pilotErrorDiag, ""
tolog("Successfully downloaded trf")
tolog("Changing permission of %s to 0755" % (trfName))
try:
os.chmod(trfName, 0755)
except Exception, e:
pilotErrorDiag = "Failed to chmod %s: %s" % (trfName, str(e))
return self.__error.ERR_CHMODTRF, pilotErrorDiag, ""
return 0, pilotErrorDiag, trfName
def getAnalysisRunCommand(self, job, jobSite, trfName):
""" Get the run command for analysis jobs """
# The run command is used to setup up the user job transform
ec = 0
pilotErrorDiag = ""
run_command = ""
return ec, pilotErrorDiag, run_command
def getFileTransferInfo(self, transferType, buildJob):
""" Get all relevant fields related to file transfer """
copysetup = readpar('copysetupin')
# create the direct access dictionary
fileTransferInfo = getDirectAccessDic(copysetup)
# if copysetupin did not contain direct access info, try the copysetup instead
if not fileTransferInfo:
copysetup = readpar('copysetup')
fileTransferInfo = getDirectAccessDic(copysetup)
# should the copytool be used?
useCopyTool = False
useFileStager = False
useDirectAccess = False
oldPrefix = ""
newPrefix = ""
dInfo = None
if fileTransferInfo:
dInfo = True
# no direct access / remote I/O, use standard copytool (copy-to-scratch)
if fileTransferInfo['useCopyTool']:
useCopyTool = True
# do not set the LFC host for file stager
if fileTransferInfo['useFileStager']:
useFileStager = True
if fileTransferInfo['directIn']:
useDirectAccess = True
oldPrefix = fileTransferInfo['oldPrefix']
newPrefix = fileTransferInfo['newPrefix']
# override settings for transferType direct
if transferType == 'direct':
useCopyTool = False
useFileStager = False
useDirectAccess = True
# should pilot create TURL based PFC? (not done here, but setup needs to be aware of it)
# if dInfo and useDirectAccess and oldPrefix == "" and newPrefix == "":
if (transferType == 'direct' or (useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == "") and not buildJob:
# if (transferType == 'direct' or (not useFileStager and useDirectAccess)) and (oldPrefix == "" and newPrefix == ""):
usePFCTurl = True
else:
usePFCTurl = False
# force usePFCTurl for all jobs
if not buildJob and useDirectAccess:
tolog("Forced usePFCTurl (reset old/newPrefix)")
usePFCTurl = True
oldPrefix = ""
newPrefix = ""
if os.environ.get("TestXRootD", 'False') == 'True':
import re
re.sub(r'\/xrootdsetup\.sh', '/xrootdsetup-dev.sh', copysetup)
return dInfo, useCopyTool, useDirectAccess, useFileStager, oldPrefix, newPrefix, copysetup, usePFCTurl
def getGuidsFromJobPars(self, jobPars, inputFiles, inFilesGuids):
""" Extract the correct guid from the input file list """
# the guids list is used for direct reading in an LFC environment
# 1. extract input file list for direct reading from jobPars
# 2. for each input file in this list, find the corresponding guid from the input file guid list
# since jobPars is entered by a human, the order of the input files might not be the same
guidList = []
jobPars = jobPars.replace("'","")
jobPars = jobPars.replace(", ",",")
pattern = re.compile(r'\-i \"\[([A-Za-z0-9.,_-]+)\]\"')
directReadingInputFiles = re.findall(pattern, jobPars)
inFiles = []
if directReadingInputFiles != []:
inFiles = directReadingInputFiles[0].split(",")
else:
match = re.search("-i ([A-Za-z0-9.\[\],_-]+) ", jobPars)
if match != None:
compactInFiles = match.group(1)
match = re.search('(.*)\[(.+)\](.*)\[(.+)\]', compactInFiles)
if match != None:
inputFiles = []
head = match.group(1)
tail = match.group(3)
body = match.group(2).split(',')
attr = match.group(4).split(',')
for idx in range(len(body)):
lfn = '%s%s%s%s' % (head, body[idx], tail, attr[idx])
inputFiles.append(lfn)
else:
inputFiles = [compactInFiles]
if inFiles != []:
for inFile in inFiles:
# get the corresponding index from the inputFiles list, which has the same order as inFilesGuids
try:
index = inputFiles.index(inFile)
except Exception, e:
tolog("!!WARNING!!2999!! Exception caught: %s (direct reading will fail)" % str(e))
else:
# add the corresponding guid to the list
guidList.append(inFilesGuids[index])
return guidList
def getMetadataForRegistration(self, guid):
""" Return metadata for [LFC] file registration """
# This method can insert special metadata into the metadata.xml file
# E.g. it can add preliminary XML tags for info that will only be known
# at a later time, such as "<metadata att_name="surl" att_value="%s-surltobeset"/>\n' % (guid)"
# The <guid>-surltobeset will be replaced by the pilot by the appropriate value once it is known
# Inputs:
# guid = file guid
# Returns:
# metadata string
# See e.g. the CMSExperiment implementation
# The method is called from pUtil::PFCxml() during metadata file creation
return ""
def getAttrForRegistration(self):
""" Return the attribute of the metadata XML to be updated with surl value """
# Used in combination with Experiment::getMetadataForRegistration()
# The attribute (default 'surl') will be copied into the metadata string used for pattern matching
# E.g. re.compile('\<metadata att\_name\=\"%s\" att\_value\=\"([a-zA-Z0-9-]+)\-surltobeset\"\/\>' % (attribute))
return 'surl'
def getExpSpecificMetadata(self, job, workdir):
""" Return experiment specific metadata """
# Inputs:
# job = PanDA pilot job object (see Job class)
# workdir = relevant work directory where the metadata is located
# Returns:
# metadata xml string
# See e.g. implementation in CMSExperiment
return ""
def getFileCatalogHosts(self):
""" Return a list of file catalog hosts """
# The method is used in combination with federated xrootd (FAX).
# In case FAX is allowed on a given site, the pilot might need to lookup
# replica information in more than one LFC catalog. Normally a site has only
# one LFC (as set in schedconfig.lfchost). Providing a list of hosts will increase
# the probability that FAX will succeed
# See e.g. ATLASExperiment implementation
return []
def verifySwbase(self, appdir):
""" Confirm existence of appdir/swbase """
# appdir/swbase is a queuedata parameter specifying the base location of physics analysis / release software
# This method will simply verify that the corresponding directory exists
#
# Input:
# appdir = application/software/release directory (e.g. /cvmfs/atlas.cern.ch/repo/sw)
# Return:
# error code (0 for success)
return 0
def interpretPayloadStdout(self, job, res, getstatusoutput_was_interrupted, current_job_number, runCommandList, failureCode):
""" Payload error interpretation and handling """
# NOTE: TODO, hide argument complexity with kwargs**
# This method can be used to interpret special errors that only occur in actual payload stdout, e.g. memory errors that have
# caused the payload to crash
#
# Inputs:
# job = PanDA pilot job object (see Job class)
# res =
# getstatusoutput_was_interrupted = True in case the payload execution command was aborted (e.g. keyboard CTRL-C)
# current_job_number = current job number, in case of multi-trf (ATLAS)
# runCommandList = list of payload execution commands (e.g. used by ATLAS to get to a setup file)
# failureCode = signal error code
# Returns:
# Updated PanDA pilot job objectwith proper payload error information, if needed
#
# The following Job attributes can be updated here
# result = tuple of size 3 that contain the standard error info: result[0] = current job status (e.g. failed, finished, holding),
# result[1] = payload error code, result[2] = PanDA pilot error code
# pilotErrorDiag = error diagnostics (string of up to 256 characters that will appear on the PanDA monitor job web page for a failed job)
# exeError
return job
def getSubprocessName(self, eventService):
""" Select which subprocess is to be run by the Monitor """
# The default subprocess is RunJob (name='Normal', which performs payload setup, stage-in, payload execution and stage-out).
# An alternative subprocess is the runEvent module which downloads events from an Event Server, executes a payload
# and stages ou output files asynchronously as they are ready.
# Note: send the entire job object to this method since there might be other subprocesses created at a later time which
# will be identified by this method using some other job data member
# Default subprocess name
name = "RunJob"
# Select alternative subprocess names for HPCs
isHPC, _name = extractHPCInfo(readpar('catchall'))
if isHPC:
name = "RunJob" + _name # e.g. "RunJobTitan" is the proper subprocess name for the Titan plug-in
# for es merge jobs
if _name and _name.startswith("Hpc"):
name = "RunJob"
# Are we going to run an event service job?
if eventService:
tolog("Encountered an event service job")
if isHPC:
name = "RunJob%sEvent" % (_name)
else:
name = "RunJobEvent"
tolog("Selected subprocess: %s" % (name))
return name
def getSubprocessArguments(self, env, port, subprocessName="RunJob"):
""" Argument list needed to launch the subprocess by the pilot/Monitor """
# The pilot/Monitor is forking a subprocess which will be monitored for work dir size, hanging processes etc
# This method returns the arguments needed to execute the subprocess (python <subprocess name> <arguments>)
# By default the pilot has implementations for RunJob.py (standard job) and RunJobEvent.py (event server job)
# If a new subprocess module is added, it startup arguments need to be specified here
jobargs = None
tolog("Will set up subprocess arguments for type: %s" % (subprocessName))
url = '%s:%s/server/panda' % (env['pshttpurl'], str(env['psport']))
if subprocessName == "RunJobEvent":
jobargs = [env['pyexe'], "RunJobEvent.py",
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
else:
jobargs = [env['pyexe'], "%s.py" % (subprocessName),
"-a", env['thisSite'].appdir,
"-b", env['queuename'],
"-d", env['jobDic']["prod"][1].workdir,
"-g", env['inputDir'],
"-i", env['jobDic']["prod"][1].tarFileGuid,
"-k", getPilotlogFilename(),
"-l", env['pilot_initdir'],
"-m", env['outputDir'],
"-o", env['thisSite'].workdir,
"-p", str(port),
"-s", env['thisSite'].sitename,
"-t", str(env['proxycheckFlag']),
"-x", str(env['stageinretry']),
"-E", str(env['stageoutretry']),
"-F", env['experiment'],
"-H", env['cache'],
"-W", url]
if 'yodaNodes' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-N")
jobargs.append(str(env['yodaNodes']))
if 'yodaQueue' in env and subprocessName == "RunJobHpcEvent":
jobargs.append("-Q")
jobargs.append(str(env['yodaQueue']))
tolog("Will use arguments: %s" % str(jobargs))
return jobargs
# Optional
def doSpecialLogFileTransfer(self, **argdict):
""" Should the log file be transfered to a special SE? """
# The log file can at the end of the job be stored in a special SE - in addition to the normal stage-out of the log file
# If this method returns True, the JobLog class will attempt to store the log file in a secondary SE after the transfer of
# the log to the primary/normal SE. Additional information about the secondary SE is required and can be specified in
# another optional method defined in the *Experiment classes
# eventService = argdict.get('eventService', False)
return False
# Optional
def getSchedconfigURL(self, protocol="http://"):
""" Define the URL for the schedconfig / PanDA server"""
# This method gets called from SiteInformation in case the URL is not set (by the wrapper)
return protocol + "pandaserver.cern.ch"
# Optional
def getSubprocess(self, cmd, stdout=None, stderr=None):
""" Execute and return a subprocess """
process = None
try:
tolog("Executing command: %s" % (cmd))
if stdout and stderr:
# use stdout/stdout file objects to redirect the stdout/stderr streams
process = Popen(cmd, shell=True, stdout=stdout, stderr=stderr, preexec_fn=os.setsid)
else:
process = Popen(cmd, shell=True)
except Exception, e:
tolog("!!WARNING!!2344!! Caught exception: %s" % (e))
else:
tolog("Subprocess is running")
return process
# Optional
def getJobExecutionCommand4EventService(self):
""" Define and test the command(s) that will be used to execute the payload for the event service """
# E.g. cmd = ["source <path>/setup.sh; <path>/python <script>"]
# The command returned from this method is executed using subprocess.Popen() from the runEvent module
# Note: this optional method only need to be defined in case the event service is to be used
# As of March 2014, this is not yet functional or documented.
# The actual command must be declared as a list since that is expected by Popen()
cmd = [""]
return cmd
# Optional
def postGetJobActions(self, job):
""" Perform any special post-job definition download actions here """
# This method is called after the getJob() method has successfully downloaded a new job (job definition) from
# the server. If the job definition e.g. contains information that contradicts WN specifics, this method can
# be used to fail the job
# Return any error code using ec, and any error message using pilotErrorDiag
ec = 0
pilotErrorDiag = ""
return ec, pilotErrorDiag
# Optional
def useTracingService(self):
return False
# Optional
def updateJobSetupScript(self, workdir, create=False, to_script=None):
""" Create or update the job setup script (used to recreate the job locally if needed) """
# If create=True, this step will only create the file with the script header (bash info)
if create:
filename = os.path.basename(self.getJobSetupScriptName(workdir))
tolog("Creating job setup script with stage-in and payload execution commands: %s" % (filename))
to_script = "#!/bin/bash\n# %s %s\n\n" % (filename, time.strftime("%d %b %Y %H:%M:%S", time.gmtime(time.time())))
# Add the string to the setup script
if to_script:
self.addToJobSetupScript(to_script, workdir)
# Optional
def getJobSetupScriptName(self, workdir):
""" return the name of the job setup file """
return os.path.join(workdir, "job_setup.sh")
# Optional
def addToJobSetupScript(self, cmd, workdir):
""" add/append command to job setup file """
filename = self.getJobSetupScriptName(workdir)
if not os.path.exists(filename):
try:
fp = open(filename, "w")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for writing: %s" % str(e))
else:
try:
fp = open(filename, "a")
except OSError, e:
tolog("!!WARNING!!1880!! Could not open job setup file for appending: %s" % str(e))
if fp:
fp.write(cmd)
fp.write("\n\n")
fp.close()
tolog("Updated %s: %s" % (filename, cmd))
# Optional
def getRelease(self, release):
""" Return a list of the software release id's """
# Assuming 'release' is a string that separates release id's with '\n'
# Used in the case of payload using multiple steps with different release versions
# E.g. release = "19.0.0\n19.1.0" -> ['19.0.0', '19.1.0']
return release.split("\n")
# Optional
def formatReleaseString(release):
""" Return a special formatted release string """
# E.g. release = "Atlas-19.0.0" -> "19.0.0"
# This method is required for ATLAS but is probably of no interest for any other PanDA user
return release
# Optional
def setCache(self, cache):
""" Cache URL """
# Used e.g. by LSST
self.__cache = cache
# Optional
def getCache(self):
""" Return the cache URL """
# Used e.g. by LSST
return self.__cache
# Optional
def useTracingService(self):
""" Use the Rucio Tracing Service """
# A service provided by the Rucio system that allows for file transfer tracking; all file transfers
# are reported by the pilot to the Rucio Tracing Service if this method returns True
return False
# Optional
def updateJobDefinition(self, job, filename):
""" Update the job definition file and object before using it in RunJob """
# This method is called from Monitor, before RunJob is launched, which allows to make changes to the job object after it was downloaded from the job dispatcher
# (used within Monitor) and the job definition file (which is used from RunJob to recreate the same job object as is used in Monitor).
# 'job' is the job object, defined in Job.py, while 'filename' is the name of the file containing the job definition information.
return job
# Optional
def shouldExecuteUtility(self):
""" Determine whether a special utility should be executed """
# The RunJob class has the possibility to execute a special utility, e.g. a memory monitor, that runs in parallel
# to the payload (launched after the main payload process).
# The utility is executed if this method returns True. The utility is currently expected to produce
# a summary JSON file whose name is defined by the getUtilityJSONFilename() method. The contents of
# this file (ie. the full JSON dictionary) will be added to the job update.
#
# Example of summary JSON file (ATLAS case):
# {"Max":{"maxVMEM":40058624,"maxPSS":10340177,"maxRSS":16342012,"maxSwap":16235568},
# "Avg":{"avgVMEM":19384236,"avgPSS":5023500,"avgRSS":6501489,"avgSwap":5964997}}
#
# While running, the MemoryMonitor also produces a regularly updated text file with the following format: (tab separated)
# Time VMEM PSS RSS Swap (first line in file)
# 1447960494 16099644 3971809 6578312 1978060
return False
# Optional
def getUtilityOutputFilename(self):
""" Return the filename of a utility output file """
# For explanation, see shouldExecuteUtility()
return "memory_monitor_output.txt"
# Optional
def getUtilityJSONFilename(self):
""" Return the filename of a utility JSON file """
# For explanation, see shouldExecuteUtility()
return "utility_summary.json"
# Optional
def getUtilityInfo(self, workdir, pilot_initdir, allowTxtFile=False):
""" Add the utility info to the node structure if available """
# Extract the relevant information from the utility tool output and add it to the dictionary
# returned by this method. The dictionary will be merged with the node dictionary in
# PandaServerClient::getNodeStructure() and sent to the PanDA server
return {}
# Optional
def getUtilityCommand(self, **argdict):
""" Prepare a utility command string """
# This method can be used to prepare a setup string for an optional utility tool, e.g. a memory monitor,
# that will be executed by the pilot in parallel with the payload.
# The pilot will look for an output JSON file (summary.json) and will extract pre-determined fields
# from it and report them with the job updates. Currently the pilot expects to find fields related
# to memory information.
# pid = argdict.get('pid', 0)
return ""
# Optional
def getGUIDSourceFilename(self):
""" Return the filename of the file containing the GUIDs for the output files """
# In the case of ATLAS, Athena produces an XML file containing the GUIDs of the output files. The name of this
# file is PoolFileCatalog.xml. If this method returns an empty string (ie the default), the GUID generation will
# be done by the pilot in RunJobUtilities::getOutFilesGuids()
return ""
# Optional
def buildFAXPath(self, **argdict):
""" Build a proper FAX path """
# This method builds proper FAX paths and is used in pure FAX mode (i.e. when FAX is used in forced mode),
# particularly when the PoolFileCatalog.xml is built prior to stage-in
# Only needed if FAX mechanism is used in forced mode (i.e. when copytoolin='fax')
lfn = argdict.get('lfn', 'default_lfn')
scope = argdict.get('scope', 'default_scope')
subpath = argdict.get('subpath', 'atlas/rucio/')
pandaID = argdict.get('pandaID', '')
sourceSite = argdict.get('sourceSite', 'default_sourcesite')
computingSite = argdict.get('computingSite', 'default_computingsite')
# Get the proper FAX redirector (default ATLAS implementation)
from FAXTools import getFAXRedirectors
# First get the global redirectors (several, since the lib file might not be at the same place for overflow jobs)
fax_redirectors_dictionary = getFAXRedirectors(computingSite, sourceSite, pandaID)
tolog("fax_redirectors_dictionary=%s"%str(fax_redirectors_dictionary))
# select the proper fax redirector
if ".lib." in lfn:
redirector = fax_redirectors_dictionary['computingsite']
else:
redirector = fax_redirectors_dictionary['sourcesite']
# Make sure the redirector ends with a double slash
if not redirector.endswith('//'):
if redirector.endswith('/'):
redirector += "/"
else:
redirector += "//"
# Make sure that the subpath does not begin with a slash
if subpath.startswith('/') and len(subpath) > 1:
subpath = subpath[1:]
tolog("redirector=%s"%(redirector))
tolog("subpath=%s"%(subpath))
tolog("scope=%s"%(scope))
tolog("lfn=%s"%(lfn))
return redirector + subpath + scope + ":" + lfn
if __name__ == "__main__":
a=Experiment()
print a.getSubprocessName(False)
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import brewery.utils as utils
import heapq
__all__ = (
"create_node",
"node_dictionary",
"node_catalogue",
"get_node_info",
"NodeFinished",
"Node",
"SourceNode",
"TargetNode",
"Stack"
)
# FIXME: temporary dictionary to record displayed warnings about __node_info__
_node_info_warnings = set()
def create_node(identifier, *args, **kwargs):
"""Creates a node of type specified by `identifier`. Options are passed to
the node initializer"""
d = node_dictionary()
node_class = d[identifier]
node = node_class(*args, **kwargs)
return node
def node_dictionary():
"""Return a dictionary containing node name as key and node class as
value. This will be depreciated soon in favour of
:func:`node_catalogue()`"""
classes = node_subclasses(Node)
dictionary = {}
for c in classes:
try:
name = c.identifier()
dictionary[name] = c
except AttributeError:
# If node does not provide identifier, we consider it to be
# private or abstract class
pass
return dictionary
def node_catalogue():
"""Returns a dictionary of information about all available nodes. Keys are
node identifiers, values are dictionaries. The information dictionary contains
all the keys from the node's `node_info` dictionary plus keys: `factory`
with node class, `type` (if not provided) is set to one of ``source``,
``processing`` or ``target``.
"""
classes = node_subclasses(Node)
catalogue = {}
for node_class in classes:
try:
name = node_class.identifier()
except AttributeError:
# If node does not provide identifier, we consider it to be
# private or abstract class
continue
# Get copy of node info
info = dict(get_node_info(node_class))
info["name"] = name
info["factory"] = node_class
# Get node type based on superclass, if not provided
if "type" not in info:
if issubclass(node_class, SourceNode):
info["type"] = "source"
elif not issubclass(node_class, SourceNode) \
and not issubclass(node_class, TargetNode):
info["type"] = "processing"
elif issubclass(node_class, TargetNode):
info["type"] = "target"
else:
info["type"] = "unknown"
catalogue[name] = info
return catalogue
def node_subclasses(root, abstract = False):
"""Get all subclasses of node.
:Parameters:
* `abstract`: If set to ``True`` all abstract classes are included as well. Default is
``False``
"""
classes = []
for c in utils.subclass_iterator(root):
try:
info = get_node_info(c)
node_type = info.get("type")
if node_type != "abstract":
classes.append(c)
except AttributeError:
pass
return classes
def get_node_info(cls):
"""Get node info attribute of a node - transient function during
depreciation"""
if hasattr(cls, "__node_info__") and cls not in _node_info_warnings:
utils.get_logger().warn("depreciated __node_info__ present in %s, rename to node_info" \
" (this warning will be shown only once)" % str(cls))
_node_info_warnings.add(cls)
return cls.__node_info__
else:
return cls.node_info
class Stack(object):
"""A stack holding records from a pipe. Each record has a key.
At most `depth` records are stored based on their key order.
"""
def __init__(self, depth):
self.depth = depth
self.heap = []
self.elements = {}
def push(self, key, value):
"""Push a `value` into rank `key` in the stack.
If stack is full, remove the highest-key element. """
if len(self.heap)<self.depth:
heapq.heappush(self.heap, key)
self.elements[key] = value
else:
oldkey = heapq.heappushpop(self.heap, key)
self.elements[key] = value
del self.elements[oldkey]
def pop(self):
"""Pop an arbitrary element from the stack."""
try:
key = heapq.heappop(self.heap)
return self.elements[key]
except:
raise StopIteration
def items(self):
"""An iterator of all elements."""
return self.elements.values()
class NodeFinished(Exception):
"""Exception raised when node has no active outputs - each output node signalised that it
requires no more data."""
pass
class Node(object):
"""Base class for procesing node
.. abstract_node
"""
def __init__(self):
"""Creates a new data processing node.
:Attributes:
* `inputs`: input pipes
* `outputs`: output pipes
* `description`: custom node annotation
"""
super(Node, self).__init__()
self.inputs = []
self.outputs = []
self._active_outputs = []
self.description = None
# Experimental: dictionary to be used to retype output fields
# Currently used only in CSV source node.
self._retype_dictionary = {}
def initialize(self):
"""Initializes the node. Initialization is separated from creation. Put any Node subclass
initialization in this method. Default implementation does nothing.
.. note:
Why the ``initialize()`` method? Node initiaization is different action from node object
instance initialization in the ``__init__()`` method. Before executing node contents, the
node has to be initialized - files or network connections opened, temporary tables created,
data that are going to be used for configuration fetched, ... Initialization might require
node to be fully configured first: all node attributes set to desired values.
"""
pass
def finalize(self):
"""Finalizes the node. Default implementation does nothing."""
pass
def run(self):
"""Main method for running the node code. Subclasses should implement this method.
"""
raise NotImplementedError("Subclasses of Node should implement the run() method")
@property
def input(self):
"""Return single node imput if exists. Convenience property for nodes which process only one
input. Raises exception if there are no inputs or are more than one imput."""
if len(self.inputs) == 1:
return self.inputs[0]
else:
raise Exception("Single input requested. Node has none or more than one input (%d)."
% len(self.inputs))
def add_input(self, pipe):
if pipe not in self.inputs:
self.inputs.append(pipe)
else:
raise Exception("Input %s already connected" % pipe)
def add_output(self, pipe):
if pipe not in self.outputs:
self.outputs.append(pipe)
else:
raise Exception("Output %s already connected" % pipe)
def retype(self, name, **attributes):
"""Retype an output field `name` to field `field`.
.. note:
This function is not set in stone and might change. Consider it to
be experimental feature.
"""
self._retype_dictionary[name] = attributes
def reset_type(self, name):
"""Remove all retype information for field `name`"""
del self._retype_dictionary[name]
def put(self, obj):
"""Put row into all output pipes.
Raises `NodeFinished` exception when node's target nodes are not receiving data anymore.
In most cases this exception might be ignored, as it is handled in the node thread
wrapper. If you want to perform necessary clean-up in the `run()` method before exiting,
you should handle this exception and then re-reaise it or just simply return from `run()`.
This method can be called only from node's `run()` method. Do not call it from
`initialize()` or `finalize()`.
"""
active_outputs = 0
for output in self.outputs:
if not output.closed():
output.put(obj)
active_outputs += 1
# This is not very safe, as run() might not expect it
if not active_outputs:
raise NodeFinished
def put_record(self, obj):
"""Put record into all output pipes. Convenience method. Not recommended to be used.
.. warning::
Depreciated.
"""
for output in self.outputs:
output.put_record(obj)
@property
def input_fields(self):
"""Return fields from input pipe, if there is one and only one input pipe."""
return self.input.fields
@property
def output_fields(self):
"""Return fields passed to the output by the node.
Subclasses should override this method. Default implementation returns same fields as
input has, raises exception when there are more inputs or if there is no input
connected."""
if not len(self.inputs) == 1:
raise ValueError("Can not get default list of output fields: node has more than one input"
" or no input is provided. Subclasses should override this method")
if not self.input.fields:
raise ValueError("Can not get default list of output fields: input pipe fields are not "
"initialized")
return self.input.fields
@property
def output_field_names(self):
"""Convenience method for gettin names of fields generated by the node. For more information
see :meth:`brewery.nodes.Node.output_fields`"""
raise PendingDeprecationWarning
return self.output_fields.names()
@classmethod
def identifier(cls):
"""Returns an identifier name of the node class. Identifier is used
for construction of streams from dictionaries or for any other
out-of-program constructions.
Node identifier is specified in the `node_info` dictioanry as
``name``. If no explicit identifier is specified, then decamelized
class name will be used with `node` suffix removed. For example:
``CSVSourceNode`` will be ``csv_source``.
"""
logger = utils.get_logger()
# FIXME: this is temporary warning
info = get_node_info(cls)
ident = None
if info:
ident = info.get("name")
if not ident:
ident = utils.to_identifier(utils.decamelize(cls.__name__))
if ident.endswith("_node"):
ident = ident[:-5]
return ident
def configure(self, config, protected = False):
"""Configure node.
:Parameters:
* `config` - a dictionary containing node attributes as keys and values as attribute
values. Key ``type`` is ignored as it is used for node creation.
* `protected` - if set to ``True`` only non-protected attributes are set. Attempt
to set protected attribute will result in an exception. Use `protected` when you are
configuring nodes through a user interface or a custom tool. Default is ``False``: all
attributes can be set.
If key in the `config` dictionary does not refer to a node attribute specified in node
description, then it is ignored.
"""
attributes = dict((a["name"], a) for a in get_node_info(self)["attributes"])
for attribute, value in config.items():
info = attributes.get(attribute)
if not info:
continue
# raise KeyError("Unknown attribute '%s' in node %s" % (attribute, str(type(self))))
if protected and info.get("protected"):
# FIXME: use some custom exception
raise Exception("Trying to set protected attribute '%s' of node '%s'" %
(attribute, str(type(self))))
else:
setattr(self, attribute, value)
class SourceNode(Node):
"""Abstract class for all source nodes
All source nodes should provide an attribute or implement a property (``@property``) called
``output_fields``.
.. abstract_node
"""
def __init__(self):
super(SourceNode, self).__init__()
@property
def output_fields(self):
raise NotImplementedError("SourceNode subclasses should implement output_fields")
def add_input(self, pipe):
raise Exception("Should not add input pipe to a source node")
class TargetNode(Node):
"""Abstract class for all target nodes
.. abstract_node
"""
def __init__(self):
super(TargetNode, self).__init__()
self.fields = None
@property
def output_fields(self):
raise RuntimeError("Output fields asked from a target object.")
def add_output(self, pipe):
raise RuntimeError("Should not add output pipe to a target node")
|
|
from __future__ import print_function
import numpy as np
import scipy.sparse as sp
import warnings
from abc import ABCMeta, abstractmethod
from . import libsvm, liblinear
from . import libsvm_sparse
from ..base import BaseEstimator, ClassifierMixin
from ..preprocessing import LabelEncoder
from ..utils.multiclass import _ovr_decision_function
from ..utils import check_array, check_consistent_length, check_random_state
from ..utils import column_or_1d, check_X_y
from ..utils import compute_class_weight
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_is_fitted
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..exceptions import ConvergenceWarning
from ..exceptions import NotFittedError
LIBSVM_IMPL = ['c_svc', 'nu_svc', 'one_class', 'epsilon_svr', 'nu_svr']
def _one_vs_one_coef(dual_coef, n_support, support_vectors):
"""Generate primal coefficients from dual coefficients
for the one-vs-one multi class LibSVM in the case
of a linear kernel."""
# get 1vs1 weights for all n*(n-1) classifiers.
# this is somewhat messy.
# shape of dual_coef_ is nSV * (n_classes -1)
# see docs for details
n_class = dual_coef.shape[0] + 1
# XXX we could do preallocation of coef but
# would have to take care in the sparse case
coef = []
sv_locs = np.cumsum(np.hstack([[0], n_support]))
for class1 in range(n_class):
# SVs for class1:
sv1 = support_vectors[sv_locs[class1]:sv_locs[class1 + 1], :]
for class2 in range(class1 + 1, n_class):
# SVs for class1:
sv2 = support_vectors[sv_locs[class2]:sv_locs[class2 + 1], :]
# dual coef for class1 SVs:
alpha1 = dual_coef[class2 - 1, sv_locs[class1]:sv_locs[class1 + 1]]
# dual coef for class2 SVs:
alpha2 = dual_coef[class1, sv_locs[class2]:sv_locs[class2 + 1]]
# build weight for class1 vs class2
coef.append(safe_sparse_dot(alpha1, sv1)
+ safe_sparse_dot(alpha2, sv2))
return coef
class BaseLibSVM(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for estimators that use libsvm as backing library
This implements support vector machine classification and regression.
Parameter documentation is in the derived `SVC` class.
"""
# The order of these must match the integer values in LibSVM.
# XXX These are actually the same in the dense case. Need to factor
# this out.
_sparse_kernels = ["linear", "poly", "rbf", "sigmoid", "precomputed"]
@abstractmethod
def __init__(self, kernel, degree, gamma, coef0,
tol, C, nu, epsilon, shrinking, probability, cache_size,
class_weight, verbose, max_iter, random_state):
if self._impl not in LIBSVM_IMPL: # pragma: no cover
raise ValueError("impl should be one of %s, %s was given" % (
LIBSVM_IMPL, self._impl))
if gamma == 0:
msg = ("The gamma value of 0.0 is invalid. Use 'auto' to set"
" gamma to a value of 1 / n_features.")
raise ValueError(msg)
self.kernel = kernel
self.degree = degree
self.gamma = gamma
self.coef0 = coef0
self.tol = tol
self.C = C
self.nu = nu
self.epsilon = epsilon
self.shrinking = shrinking
self.probability = probability
self.cache_size = cache_size
self.class_weight = class_weight
self.verbose = verbose
self.max_iter = max_iter
self.random_state = random_state
@property
def _pairwise(self):
# Used by cross_val_score.
return self.kernel == "precomputed"
def fit(self, X, y, sample_weight=None):
"""Fit the SVM model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
For kernel="precomputed", the expected shape of X is
(n_samples, n_samples).
y : array-like, shape (n_samples,)
Target values (class labels in classification, real numbers in
regression)
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Notes
------
If X and y are not C-ordered and contiguous arrays of np.float64 and
X is not a scipy.sparse.csr_matrix, X and/or y may be copied.
If X is a dense array, then the other methods will not support sparse
matrices as input.
"""
rnd = check_random_state(self.random_state)
sparse = sp.isspmatrix(X)
if sparse and self.kernel == "precomputed":
raise TypeError("Sparse precomputed kernels are not supported.")
self._sparse = sparse and not callable(self.kernel)
X, y = check_X_y(X, y, dtype=np.float64, order='C', accept_sparse='csr')
y = self._validate_targets(y)
sample_weight = np.asarray([]
if sample_weight is None
else sample_weight, dtype=np.float64)
solver_type = LIBSVM_IMPL.index(self._impl)
# input validation
if solver_type != 2 and X.shape[0] != y.shape[0]:
raise ValueError("X and y have incompatible shapes.\n" +
"X has %s samples, but y has %s." %
(X.shape[0], y.shape[0]))
if self.kernel == "precomputed" and X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
if sample_weight.shape[0] > 0 and sample_weight.shape[0] != X.shape[0]:
raise ValueError("sample_weight and X have incompatible shapes: "
"%r vs %r\n"
"Note: Sparse matrices cannot be indexed w/"
"boolean masks (use `indices=True` in CV)."
% (sample_weight.shape, X.shape))
if self.gamma in ('scale', 'auto_deprecated'):
if sparse:
# std = sqrt(E[X^2] - E[X]^2)
X_std = np.sqrt((X.multiply(X)).mean() - (X.mean())**2)
else:
X_std = X.std()
if self.gamma == 'scale':
if X_std != 0:
self._gamma = 1.0 / (X.shape[1] * X_std)
else:
self._gamma = 1.0
else:
kernel_uses_gamma = (not callable(self.kernel) and self.kernel
not in ('linear', 'precomputed'))
if kernel_uses_gamma and not np.isclose(X_std, 1.0):
# NOTE: when deprecation ends we need to remove explicitly
# setting `gamma` in examples (also in tests). See
# https://github.com/scikit-learn/scikit-learn/pull/10331
# for the examples/tests that need to be reverted.
warnings.warn("The default value of gamma will change "
"from 'auto' to 'scale' in version 0.22 to "
"account better for unscaled features. Set "
"gamma explicitly to 'auto' or 'scale' to "
"avoid this warning.", FutureWarning)
self._gamma = 1.0 / X.shape[1]
elif self.gamma == 'auto':
self._gamma = 1.0 / X.shape[1]
else:
self._gamma = self.gamma
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
fit = self._sparse_fit if self._sparse else self._dense_fit
if self.verbose: # pragma: no cover
print('[LibSVM]', end='')
seed = rnd.randint(np.iinfo('i').max)
fit(X, y, sample_weight, solver_type, kernel, random_seed=seed)
# see comment on the other call to np.iinfo in this file
self.shape_fit_ = X.shape
# In binary case, we need to flip the sign of coef, intercept and
# decision function. Use self._intercept_ and self._dual_coef_ internally.
self._intercept_ = self.intercept_.copy()
self._dual_coef_ = self.dual_coef_
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
self.intercept_ *= -1
self.dual_coef_ = -self.dual_coef_
return self
def _validate_targets(self, y):
"""Validation of y and class_weight.
Default implementation for SVR and one-class; overridden in BaseSVC.
"""
# XXX this is ugly.
# Regression models should not have a class_weight_ attribute.
self.class_weight_ = np.empty(0)
return column_or_1d(y, warn=True).astype(np.float64)
def _warn_from_fit_status(self):
assert self.fit_status_ in (0, 1)
if self.fit_status_ == 1:
warnings.warn('Solver terminated early (max_iter=%i).'
' Consider pre-processing your data with'
' StandardScaler or MinMaxScaler.'
% self.max_iter, ConvergenceWarning)
def _dense_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
if callable(self.kernel):
# you must store a reference to X to compute the kernel in predict
# TODO: add keyword copy to copy on demand
self.__Xfit = X
X = self._compute_kernel(X)
if X.shape[0] != X.shape[1]:
raise ValueError("X.shape[0] should be equal to X.shape[1]")
libsvm.set_verbosity_wrap(self.verbose)
# we don't pass **self.get_params() to allow subclasses to
# add other parameters to __init__
self.support_, self.support_vectors_, self.n_support_, \
self.dual_coef_, self.intercept_, self.probA_, \
self.probB_, self.fit_status_ = libsvm.fit(
X, y,
svm_type=solver_type, sample_weight=sample_weight,
class_weight=self.class_weight_, kernel=kernel, C=self.C,
nu=self.nu, probability=self.probability, degree=self.degree,
shrinking=self.shrinking, tol=self.tol,
cache_size=self.cache_size, coef0=self.coef0,
gamma=self._gamma, epsilon=self.epsilon,
max_iter=self.max_iter, random_seed=random_seed)
self._warn_from_fit_status()
def _sparse_fit(self, X, y, sample_weight, solver_type, kernel,
random_seed):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
X.sort_indices()
kernel_type = self._sparse_kernels.index(kernel)
libsvm_sparse.set_verbosity_wrap(self.verbose)
self.support_, self.support_vectors_, dual_coef_data, \
self.intercept_, self.n_support_, \
self.probA_, self.probB_, self.fit_status_ = \
libsvm_sparse.libsvm_sparse_train(
X.shape[1], X.data, X.indices, X.indptr, y, solver_type,
kernel_type, self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
sample_weight, self.nu, self.cache_size, self.epsilon,
int(self.shrinking), int(self.probability), self.max_iter,
random_seed)
self._warn_from_fit_status()
if hasattr(self, "classes_"):
n_class = len(self.classes_) - 1
else: # regression
n_class = 1
n_SV = self.support_vectors_.shape[0]
dual_coef_indices = np.tile(np.arange(n_SV), n_class)
dual_coef_indptr = np.arange(0, dual_coef_indices.size + 1,
dual_coef_indices.size / n_class)
self.dual_coef_ = sp.csr_matrix(
(dual_coef_data, dual_coef_indices, dual_coef_indptr),
(n_class, n_SV))
def predict(self, X):
"""Perform regression on samples in X.
For an one-class model, +1 (inlier) or -1 (outlier) is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
(n_samples_test, n_samples_train).
Returns
-------
y_pred : array, shape (n_samples,)
"""
X = self._validate_for_predict(X)
predict = self._sparse_predict if self._sparse else self._dense_predict
return predict(X)
def _dense_predict(self, X):
n_samples, n_features = X.shape
X = self._compute_kernel(X)
if X.ndim == 1:
X = check_array(X, order='C')
kernel = self.kernel
if callable(self.kernel):
kernel = 'precomputed'
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
svm_type = LIBSVM_IMPL.index(self._impl)
return libsvm.predict(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_, svm_type=svm_type, kernel=kernel,
degree=self.degree, coef0=self.coef0, gamma=self._gamma,
cache_size=self.cache_size)
def _sparse_predict(self, X):
# Precondition: X is a csr_matrix of dtype np.float64.
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
C = 0.0 # C is not useful here
return libsvm_sparse.libsvm_sparse_predict(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _compute_kernel(self, X):
"""Return the data transformed by a callable kernel"""
if callable(self.kernel):
# in the case of precomputed kernel given as a function, we
# have to compute explicitly the kernel matrix
kernel = self.kernel(X, self.__Xfit)
if sp.issparse(kernel):
kernel = kernel.toarray()
X = np.asarray(kernel, dtype=np.float64, order='C')
return X
def _decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_class * (n_class-1) / 2)
Returns the decision function of the sample for each class
in the model.
"""
# NOTE: _validate_for_predict contains check for is_fitted
# hence must be placed before any other attributes are used.
X = self._validate_for_predict(X)
X = self._compute_kernel(X)
if self._sparse:
dec_func = self._sparse_decision_function(X)
else:
dec_func = self._dense_decision_function(X)
# In binary case, we need to flip the sign of coef, intercept and
# decision function.
if self._impl in ['c_svc', 'nu_svc'] and len(self.classes_) == 2:
return -dec_func.ravel()
return dec_func
def _dense_decision_function(self, X):
X = check_array(X, dtype=np.float64, order="C")
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
return libsvm.decision_function(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=LIBSVM_IMPL.index(self._impl),
kernel=kernel, degree=self.degree, cache_size=self.cache_size,
coef0=self.coef0, gamma=self._gamma)
def _sparse_decision_function(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if hasattr(kernel, '__call__'):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_decision_function(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _validate_for_predict(self, X):
check_is_fitted(self, 'support_')
X = check_array(X, accept_sparse='csr', dtype=np.float64, order="C")
if self._sparse and not sp.isspmatrix(X):
X = sp.csr_matrix(X)
if self._sparse:
X.sort_indices()
if sp.issparse(X) and not self._sparse and not callable(self.kernel):
raise ValueError(
"cannot use sparse input in %r trained on dense data"
% type(self).__name__)
n_samples, n_features = X.shape
if self.kernel == "precomputed":
if X.shape[1] != self.shape_fit_[0]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of samples at training time" %
(X.shape[1], self.shape_fit_[0]))
elif n_features != self.shape_fit_[1]:
raise ValueError("X.shape[1] = %d should be equal to %d, "
"the number of features at training time" %
(n_features, self.shape_fit_[1]))
return X
@property
def coef_(self):
if self.kernel != 'linear':
raise AttributeError('coef_ is only available when using a '
'linear kernel')
coef = self._get_coef()
# coef_ being a read-only property, it's better to mark the value as
# immutable to avoid hiding potential bugs for the unsuspecting user.
if sp.issparse(coef):
# sparse matrix do not have global flags
coef.data.flags.writeable = False
else:
# regular dense array
coef.flags.writeable = False
return coef
def _get_coef(self):
return safe_sparse_dot(self._dual_coef_, self.support_vectors_)
class BaseSVC(six.with_metaclass(ABCMeta, BaseLibSVM, ClassifierMixin)):
"""ABC for LibSVM-based classifiers."""
@abstractmethod
def __init__(self, kernel, degree, gamma, coef0, tol, C, nu,
shrinking, probability, cache_size, class_weight, verbose,
max_iter, decision_function_shape, random_state):
self.decision_function_shape = decision_function_shape
super(BaseSVC, self).__init__(
kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
random_state=random_state)
def _validate_targets(self, y):
y_ = column_or_1d(y, warn=True)
check_classification_targets(y)
cls, y = np.unique(y_, return_inverse=True)
self.class_weight_ = compute_class_weight(self.class_weight, cls, y_)
if len(cls) < 2:
raise ValueError(
"The number of classes has to be greater than one; got %d"
" class" % len(cls))
self.classes_ = cls
return np.asarray(y, dtype=np.float64, order='C')
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples, n_classes * (n_classes-1) / 2)
Returns the decision function of the sample for each class
in the model.
If decision_function_shape='ovr', the shape is (n_samples,
n_classes)
"""
dec = self._decision_function(X)
if self.decision_function_shape == 'ovr' and len(self.classes_) > 2:
return _ovr_decision_function(dec < 0, -dec, len(self.classes_))
return dec
def predict(self, X):
"""Perform classification on samples in X.
For an one-class model, +1 or -1 is returned.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
y_pred : array, shape (n_samples,)
Class labels for samples in X.
"""
y = super(BaseSVC, self).predict(X)
return self.classes_.take(np.asarray(y, dtype=np.intp))
# Hacky way of getting predict_proba to raise an AttributeError when
# probability=False using properties. Do not use this in new code; when
# probabilities are not available depending on a setting, introduce two
# estimators.
def _check_proba(self):
if not self.probability:
raise AttributeError("predict_proba is not available when "
" probability=False")
if self._impl not in ('c_svc', 'nu_svc'):
raise AttributeError("predict_proba only implemented for SVC"
" and NuSVC")
@property
def predict_proba(self):
"""Compute probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the probability of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_proba
def _predict_proba(self, X):
X = self._validate_for_predict(X)
if self.probA_.size == 0 or self.probB_.size == 0:
raise NotFittedError("predict_proba is not available when fitted "
"with probability=False")
pred_proba = (self._sparse_predict_proba
if self._sparse else self._dense_predict_proba)
return pred_proba(X)
@property
def predict_log_proba(self):
"""Compute log probabilities of possible outcomes for samples in X.
The model need to have probability information computed at training
time: fit with attribute `probability` set to True.
Parameters
----------
X : array-like, shape (n_samples, n_features)
For kernel="precomputed", the expected shape of X is
[n_samples_test, n_samples_train]
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probabilities of the sample for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
Notes
-----
The probability model is created using cross validation, so
the results can be slightly different than those obtained by
predict. Also, it will produce meaningless results on very small
datasets.
"""
self._check_proba()
return self._predict_log_proba
def _predict_log_proba(self, X):
return np.log(self.predict_proba(X))
def _dense_predict_proba(self, X):
X = self._compute_kernel(X)
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
svm_type = LIBSVM_IMPL.index(self._impl)
pprob = libsvm.predict_proba(
X, self.support_, self.support_vectors_, self.n_support_,
self._dual_coef_, self._intercept_,
self.probA_, self.probB_,
svm_type=svm_type, kernel=kernel, degree=self.degree,
cache_size=self.cache_size, coef0=self.coef0, gamma=self._gamma)
return pprob
def _sparse_predict_proba(self, X):
X.data = np.asarray(X.data, dtype=np.float64, order='C')
kernel = self.kernel
if callable(kernel):
kernel = 'precomputed'
kernel_type = self._sparse_kernels.index(kernel)
return libsvm_sparse.libsvm_sparse_predict_proba(
X.data, X.indices, X.indptr,
self.support_vectors_.data,
self.support_vectors_.indices,
self.support_vectors_.indptr,
self._dual_coef_.data, self._intercept_,
LIBSVM_IMPL.index(self._impl), kernel_type,
self.degree, self._gamma, self.coef0, self.tol,
self.C, self.class_weight_,
self.nu, self.epsilon, self.shrinking,
self.probability, self.n_support_,
self.probA_, self.probB_)
def _get_coef(self):
if self.dual_coef_.shape[0] == 1:
# binary classifier
coef = safe_sparse_dot(self.dual_coef_, self.support_vectors_)
else:
# 1vs1 classifier
coef = _one_vs_one_coef(self.dual_coef_, self.n_support_,
self.support_vectors_)
if sp.issparse(coef[0]):
coef = sp.vstack(coef).tocsr()
else:
coef = np.vstack(coef)
return coef
def _get_liblinear_solver_type(multi_class, penalty, loss, dual):
"""Find the liblinear magic number for the solver.
This number depends on the values of the following attributes:
- multi_class
- penalty
- loss
- dual
The same number is also internally used by LibLinear to determine
which solver to use.
"""
# nested dicts containing level 1: available loss functions,
# level2: available penalties for the given loss function,
# level3: wether the dual solver is available for the specified
# combination of loss function and penalty
_solver_type_dict = {
'logistic_regression': {
'l1': {False: 6},
'l2': {False: 0, True: 7}},
'hinge': {
'l2': {True: 3}},
'squared_hinge': {
'l1': {False: 5},
'l2': {False: 2, True: 1}},
'epsilon_insensitive': {
'l2': {True: 13}},
'squared_epsilon_insensitive': {
'l2': {False: 11, True: 12}},
'crammer_singer': 4
}
if multi_class == 'crammer_singer':
return _solver_type_dict[multi_class]
elif multi_class != 'ovr':
raise ValueError("`multi_class` must be one of `ovr`, "
"`crammer_singer`, got %r" % multi_class)
_solver_pen = _solver_type_dict.get(loss, None)
if _solver_pen is None:
error_string = ("loss='%s' is not supported" % loss)
else:
_solver_dual = _solver_pen.get(penalty, None)
if _solver_dual is None:
error_string = ("The combination of penalty='%s' "
"and loss='%s' is not supported"
% (penalty, loss))
else:
solver_num = _solver_dual.get(dual, None)
if solver_num is None:
error_string = ("The combination of penalty='%s' and "
"loss='%s' are not supported when dual=%s"
% (penalty, loss, dual))
else:
return solver_num
raise ValueError('Unsupported set of arguments: %s, '
'Parameters: penalty=%r, loss=%r, dual=%r'
% (error_string, penalty, loss, dual))
def _fit_liblinear(X, y, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol,
random_state=None, multi_class='ovr',
loss='logistic_regression', epsilon=0.1,
sample_weight=None):
"""Used by Logistic Regression (and CV) and LinearSVC.
Preprocessing is done in this function before supplying it to liblinear.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X
C : float
Inverse of cross-validation parameter. Lower the C, the more
the penalization.
fit_intercept : bool
Whether or not to fit the intercept, that is to add a intercept
term to the decision function.
intercept_scaling : float
LibLinear internally penalizes the intercept and this term is subject
to regularization just like the other terms of the feature vector.
In order to avoid this, one should increase the intercept_scaling.
such that the feature vector becomes [x, intercept_scaling].
class_weight : {dict, 'balanced'}, optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
penalty : str, {'l1', 'l2'}
The norm of the penalty used in regularization.
dual : bool
Dual or primal formulation,
verbose : int
Set verbose to any positive number for verbosity.
max_iter : int
Number of iterations.
tol : float
Stopping condition.
random_state : int, RandomState instance or None, optional (default=None)
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
multi_class : str, {'ovr', 'crammer_singer'}
`ovr` trains n_classes one-vs-rest classifiers, while `crammer_singer`
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from an theoretical perspective
as it is consistent it is seldom used in practice and rarely leads to
better accuracy and is more expensive to compute.
If `crammer_singer` is chosen, the options loss, penalty and dual will
be ignored.
loss : str, {'logistic_regression', 'hinge', 'squared_hinge',
'epsilon_insensitive', 'squared_epsilon_insensitive}
The loss function used to fit the model.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set epsilon=0.
sample_weight : array-like, optional
Weights assigned to each sample.
Returns
-------
coef_ : ndarray, shape (n_features, n_features + 1)
The coefficient vector got by minimizing the objective function.
intercept_ : float
The intercept term added to the vector.
n_iter_ : int
Maximum number of iterations run across all classes.
"""
if loss not in ['epsilon_insensitive', 'squared_epsilon_insensitive']:
enc = LabelEncoder()
y_ind = enc.fit_transform(y)
classes_ = enc.classes_
if len(classes_) < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
class_weight_ = compute_class_weight(class_weight, classes_, y)
else:
class_weight_ = np.empty(0, dtype=np.float64)
y_ind = y
liblinear.set_verbosity_wrap(verbose)
rnd = check_random_state(random_state)
if verbose:
print('[LibLinear]', end='')
# LinearSVC breaks when intercept_scaling is <= 0
bias = -1.0
if fit_intercept:
if intercept_scaling <= 0:
raise ValueError("Intercept scaling is %r but needs to be greater than 0."
" To disable fitting an intercept,"
" set fit_intercept=False." % intercept_scaling)
else:
bias = intercept_scaling
libsvm.set_verbosity_wrap(verbose)
libsvm_sparse.set_verbosity_wrap(verbose)
liblinear.set_verbosity_wrap(verbose)
# LibLinear wants targets as doubles, even for classification
y_ind = np.asarray(y_ind, dtype=np.float64).ravel()
if sample_weight is None:
sample_weight = np.ones(X.shape[0])
else:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(sample_weight, X)
solver_type = _get_liblinear_solver_type(multi_class, penalty, loss, dual)
raw_coef_, n_iter_ = liblinear.train_wrap(
X, y_ind, sp.isspmatrix(X), solver_type, tol, bias, C,
class_weight_, max_iter, rnd.randint(np.iinfo('i').max),
epsilon, sample_weight)
# Regarding rnd.randint(..) in the above signature:
# seed for srand in range [0..INT_MAX); due to limitations in Numpy
# on 32-bit platforms, we can't get to the UINT_MAX limit that
# srand supports
n_iter_ = max(n_iter_)
if n_iter_ >= max_iter:
warnings.warn("Liblinear failed to converge, increase "
"the number of iterations.", ConvergenceWarning)
if fit_intercept:
coef_ = raw_coef_[:, :-1]
intercept_ = intercept_scaling * raw_coef_[:, -1]
else:
coef_ = raw_coef_
intercept_ = 0.
return coef_, intercept_, n_iter_
|
|
import sqlite3
class BurneyDB(object):
class MissingLinkedIDs(Exception):
pass
def __init__(self, dbfile = "burney.db", json_archive = "/datastore/burneyjson", areas_archive = "/datastore/burneyareas"):
self._conn = sqlite3.connect(dbfile)
# Dict responses:
self._conn.row_factory = sqlite3.Row
self._cur = self._conn.cursor()
self._title_keys = ["id", "title", "titleAbbreviation", "titleContinues", "titleContinuedBy", "placeOfPublication", "datesOfPublication", "typeOfPublication", "earliest_issue", "last_issue"]
self._issue_keys = ["id", "volumeNumber", "issueNumber", "printedDate", "normalisedDate", "pageCount", "ESTC", "title_id"]
self._pages_keys = ["id", "number_of_articles", "day", "month", "year", "filepath", "title_id", "issue_id"]
self._corrupt_keys = ["id", "filepath", "day", "month", "year", "newspaper", "xmlfile"]
self.create_scanned_cache()
def create_scanned_cache(self):
self._cached_scanned = [x[0] for x in self._cur.execute("""SELECT filepath FROM pages;""").fetchall()]
self._cached_scanned += [x[0] for x in self._cur.execute("""SELECT filepath FROM corrupt;""").fetchall()]
def create_tables(self, sure_about_this = False):
if sure_about_this:
self._cur.executescript("""
DROP TABLE IF EXISTS title_metadata;
CREATE TABLE title_metadata(id INTEGER PRIMARY KEY,
title TEXT,
titleAbbreviation TEXT,
titleContinues TEXT,
titleContinuedBy TEXT,
placeOfPublication TEXT,
datesOfPublication TEXT,
typeOfPublication TEXT,
earliest_issue TEXT,
last_issue TEXT);
DROP TABLE IF EXISTS issue_metadata;
CREATE TABLE issue_metadata(id INTEGER PRIMARY KEY,
volumeNumber TEXT,
issueNumber TEXT,
printedDate TEXT,
normalisedDate TEXT,
pageCount TEXT,
ESTC TEXT,
title_id INTEGER,
FOREIGN KEY(title_id) REFERENCES title_metadata(id)
);
DROP TABLE IF EXISTS pages;
CREATE TABLE pages(id INTEGER PRIMARY KEY,
number_of_articles INTEGER,
day TEXT,
month TEXT,
year TEXT,
filepath TEXT,
title_id INTEGER,
issue_id INTEGER,
FOREIGN KEY(title_id) REFERENCES title_metadata(id),
FOREIGN KEY(issue_id) REFERENCES issue_metadata(id)
);
DROP TABLE IF EXISTS corrupt;
CREATE TABLE corrupt(xmlfile TEXT UNIQUE,
id INTEGER PRIMARY KEY,
filepath TEXT,
day TEXT,
month TEXT,
year TEXT,
newspaper TEXT);
""")
self._conn.commit()
def commit(self):
return self._conn.commit()
def close(self):
return self._conn.close()
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self._conn.commit()
self._conn.close()
def get_title_row(self, **kwds):
where_filters = ['{0} = "{1}"'.format(k,v) for k,v in kwds.items() if k in self._title_keys]
self._cur.execute("""SELECT {0} FROM title_metadata WHERE {1}
LIMIT 1;""".format(",".join(self._title_keys), " AND ".join(where_filters)))
resp = self._cur.fetchone()
if resp != None:
return dict(resp)
def add_title_row(self, md):
# does it exist already? match titleAbbreviation, the title and the date range.
# create new if no match is found.
for item in self._title_keys[1:]: # everything but the id column
if item not in md:
md[item] = ""
self._cur.execute("""SELECT id FROM title_metadata WHERE
titleAbbreviation="{titleAbbreviation}" AND
title="{title}" AND
datesOfPublication="{datesOfPublication}"
LIMIT 1;""".format(**md))
resp = self._cur.fetchone()
if resp == None:
# create new record
self._cur.execute("""INSERT INTO title_metadata(title, titleAbbreviation, titleContinues, titleContinuedBy, placeOfPublication, datesOfPublication, typeOfPublication) VALUES(:title, :titleAbbreviation, :titleContinues, :titleContinuedBy, :placeOfPublication, :datesOfPublication, :typeOfPublication);""", md)
id = self._cur.lastrowid
else:
id = resp[0]
return id
def get_issue_row(self, **kwds):
where_filters = ['{0} = "{1}"'.format(k,v) for k,v in kwds.items() if k in self._issue_keys]
self._cur.execute("""SELECT {0} FROM issue_metadata WHERE {1}
LIMIT 1;""".format(",".join(self._issue_keys), " AND ".join(where_filters)))
resp = self._cur.fetchone()
if resp != None:
return dict(resp)
def get_entry_row(self, **kwds):
where_filters = ['{0} = "{1}"'.format(k,v) for k,v in kwds.items() if k in self._pages_keys]
self._cur.execute("""SELECT {0} FROM pages WHERE {1}
LIMIT 1;""".format(",".join(self._pages_keys), " AND ".join(where_filters)))
resp = self._cur.fetchone()
if resp != None:
return dict(resp)
def add_issue_row(self, md):
# does it exist already? match titleAbbreviation, the title and the date range.
# create new if no match is found.
for item in self._issue_keys[1:]: # everything but the id column
if item not in md:
md[item] = ""
if not md['title_id']:
raise MissingLinkedIDs
self._cur.execute("""SELECT id FROM issue_metadata WHERE
ESTC="{ESTC}" AND
issueNumber="{issueNumber}" AND
normalisedDate="{normalisedDate}" AND
title_id={title_id}
LIMIT 1;""".format(**md))
resp = self._cur.fetchone()
if resp == None:
# create new record
self._cur.execute("""INSERT INTO issue_metadata(volumeNumber, issueNumber, printedDate,normalisedDate,pageCount,ESTC,title_id) VALUES(:volumeNumber, :issueNumber, :printedDate,:normalisedDate,:pageCount,:ESTC,:title_id);""", md)
id = self._cur.lastrowid
else:
id = resp[0]
return id
def add_service_dir(self, md):
for item in self._pages_keys[1:]:
if item not in md:
md[item] = ""
if not md['title_id'] or not md['issue_id']:
raise MissingLinkedIDs
self._cur.execute("""INSERT INTO pages(number_of_articles, day, month, year, filepath, title_id, issue_id) VALUES (:number_of_articles, :day, :month, :year, :filepath, :title_id, :issue_id)""", md)
id = self._cur.lastrowid
return id
def list_all_newspapers(self):
self._cur.execute("""SELECT * from title_metadata;""")
for md in self._cur.fetchall():
yield dict(md)
def list_all_issues(self):
self._cur.execute("""SELECT * from issue_metadata;""")
for md in self._cur.fetchall():
yield dict(md)
def list_all_entries(self, titleAbbreviation = None, title_id = None, issue_id = None, **kwds):
if titleAbbreviation != None:
t_md = self.get_title_row(titleAbbreviation = titleAbbreviation)
if t_md != None:
title_id = t_md['id']
where_filters = []
if title_id != None:
where_filters.append('title_id = "{0}"'.format(title_id))
if issue_id != None:
where_filters.append('issue_id = "{0}"'.format(issue_id))
for key in kwds:
if key in self._pages_keys:
where_filters.append('{0} = "{1}"'.format(key, kwds[key]))
if where_filters != []:
self._cur.execute("""SELECT {0} from pages WHERE {1};""".format(",".join(self._pages_keys), " AND ".join(where_filters)))
else:
self._cur.execute("""SELECT {0} from pages;""".format(",".join(self._pages_keys)))
for md in self._cur.fetchall():
yield dict(md)
def scanned(self, filepath, skipping=False):
if skipping:
# only hit the cache:
return filepath in self._cached_scanned
self._cur.execute("""SELECT id FROM pages WHERE filepath="{0}";""".format(filepath))
if self._cur.fetchone():
return True
# not there. Is it corrupt then?
self._cur.execute("""SELECT id FROM corrupt WHERE filepath="{0}";""".format(filepath))
if self._cur.fetchone():
return True
return False
def mark_corrupt(self, md):
for item in self._corrupt_keys[1:]:
if item not in md:
md[item] = ""
self._cur.execute("""INSERT OR IGNORE INTO corrupt(filepath, day, month, year, newspaper, xmlfile)
VALUES (:filepath, :day, :month, :year, :newspaper, :xmlfile);""", md)
id = self._cur.lastrowid
return id
def list_corrupt_files(self, title_id = None):
if title_id == None:
self._cur.execute("""SELECT * from corrupt;""")
else:
t_md = self.get_title_row(id = title_id)
self._cur.execute("""SELECT * from corrupt WHERE newspaper = "{0}";""".format(t_md['titleAbbreviation']))
return self._cur.fetchall()
def update_title_row(self, **kwds):
if 'id' not in kwds and 'titleAbbreviation' not in kwds:
print("Cannot update title information without some key identifier.")
return
else:
data_line = ",".join(['{0}="{{{0}}}"'.format(x) for x in kwds.keys() if x not in ['id', 'titleAbbreviation'] and x in self._title_keys])
wheres = []
if 'id' in kwds:
wheres.append("id={id}")
if 'titleAbbreviation' in kwds:
wheres.append("titleAbbreviation=:titleAbbreviation")
where_line = " AND ".join(wheres)
q = "UPDATE title_metadata SET " + data_line + " WHERE " + where_line + ";"
self._cur.execute(q.format(**kwds))
def update_issue_row(self, **kwds):
if 'id' not in kwds:
print("Cannot update issue information without some key identifier.")
return
else:
data_line = ",".join(['{0}="{{{0}}}"'.format(x) for x in kwds.keys() if x != "id" and x in self._issue_keys])
q = "UPDATE issue_metadata SET " + data_line + " WHERE id={id};"
self._cur.execute(q.format(**kwds))
def _update_newspaper(self, id):
from datetime import datetime
dates = []
printed_dates = {}
for entry in self.list_all_entries(title_id = id):
try:
if entry['month'] == "00" or entry['month'] == "0":
dates.append(datetime(int(entry['year']),1,1))
elif entry['day'] == "00" or entry['day'] == "0":
dates.append(datetime(int(entry['year']), int(entry['month']),1))
else:
dates.append(datetime(int(entry['year']), int(entry['month']), int(entry['day'])))
printed_dates[dates[-1]] = "{year}-{month}-{day}".format(**dict(entry))
except ValueError as e:
print("Couldn't form date from:")
print(dict(entry))
dates.sort()
self.update_title_row(id = id, earliest_issue = printed_dates[dates[0]],
last_issue = printed_dates[dates[-1]])
return printed_dates[dates[0]], printed_dates[dates[-1]]
def regenerate_earliest_latest_records(self):
# Update the convenience columns in 'title_metadata' to reflect the earliest and latest
# issue we have scanned for a given newspaper run
for md in list(self.list_all_newspapers()):
e,l = self._update_newspaper(md['id'])
print("Updating {0} - id={1} with {2} -> {3}".format(md['title'], md['id'], e, l))
def export_newspaper_data(self, filename):
import csv
with open(filename, "w") as co:
doc = csv.DictWriter(co, fieldnames = self._title_keys)
doc.writerow(dict([(x,x) for x in self._title_keys]))
for md in self.list_all_newspapers():
doc.writerow(dict(md))
if __name__ == "__main__":
# load the backup db as 'db'
print("Loading 'burney.db.devuse' as db. (Won't have full corrupt file logs but has up to date metadata/filepaths.)")
db = BurneyDB("burney.db.devuse")
|
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `tf.data.Dataset.shuffle()`."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from absl.testing import parameterized
import numpy as np
from tensorflow.python.data.experimental.ops import iterator_ops as contrib_iterator_ops
from tensorflow.python.data.kernel_tests import checkpoint_test_base
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import function
from tensorflow.python.framework import combinations
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.tracking import util as trackable_utils
class ShuffleTest(test_base.DatasetTestBase, parameterized.TestCase):
@combinations.generate(test_base.default_test_combinations())
def testBasic(self):
components = (
np.array([1, 2, 3, 4]), np.array([5, 6, 7, 8]),
np.array([9.0, 10.0, 11.0, 12.0])
)
def dataset_fn(count=5, buffer_size=None, seed=0):
repeat_dataset = (
dataset_ops.Dataset.from_tensor_slices(components).repeat(count))
if buffer_size:
shuffle_dataset = repeat_dataset.shuffle(buffer_size, seed)
self.assertEqual(
tuple([c.shape[1:] for c in components]),
dataset_ops.get_legacy_output_shapes(shuffle_dataset))
return shuffle_dataset
else:
return repeat_dataset
# First run without shuffling to collect the "ground truth".
get_next = self.getNext(dataset_fn())
unshuffled_elements = []
for _ in range(20):
unshuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
# Assert that the shuffled dataset has the same elements as the
# "ground truth".
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
shuffled_elements = []
for _ in range(20):
shuffled_elements.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(sorted(unshuffled_elements), sorted(shuffled_elements))
# Assert that shuffling twice with the same seeds gives the same sequence.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=37))
reshuffled_elements_same_seed = []
for _ in range(20):
reshuffled_elements_same_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertEqual(shuffled_elements, reshuffled_elements_same_seed)
# Assert that shuffling twice with a different seed gives a different
# permutation of the same elements.
get_next = self.getNext(dataset_fn(buffer_size=100, seed=137))
reshuffled_elements_different_seed = []
for _ in range(20):
reshuffled_elements_different_seed.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertNotEqual(shuffled_elements, reshuffled_elements_different_seed)
self.assertAllEqual(
sorted(shuffled_elements), sorted(reshuffled_elements_different_seed))
# Assert that the shuffled dataset has the same elements as the
# "ground truth" when the buffer size is smaller than the input
# dataset.
get_next = self.getNext(dataset_fn(buffer_size=2, seed=37))
reshuffled_elements_small_buffer = []
for _ in range(20):
reshuffled_elements_small_buffer.append(self.evaluate(get_next()))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
self.assertAllEqual(
sorted(unshuffled_elements), sorted(reshuffled_elements_small_buffer))
# Test the case of shuffling an empty dataset.
get_next = self.getNext(dataset_fn(count=0, buffer_size=100, seed=37))
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(combinations.combine(tf_api_version=1, mode="graph"))
def testSeedZero(self):
"""Test for same behavior when the seed is a Python or Tensor zero."""
iterator = dataset_ops.make_one_shot_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=0))
get_next = iterator.get_next()
elems = []
with self.cached_session() as sess:
for _ in range(10):
elems.append(sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
seed_placeholder = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.make_initializable_iterator(
dataset_ops.Dataset.range(10).shuffle(10, seed=seed_placeholder))
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(iterator.initializer, feed_dict={seed_placeholder: 0})
for elem in elems:
self.assertEqual(elem, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
@combinations.generate(test_base.default_test_combinations())
def testDefaultArguments(self):
components = [0, 1, 2, 3, 4]
dataset = dataset_ops.Dataset.from_tensor_slices(components).shuffle(
5).repeat()
get_next = self.getNext(dataset)
counts = collections.defaultdict(lambda: 0)
for _ in range(10):
for _ in range(5):
counts[self.evaluate(get_next())] += 1
for i in range(5):
self.assertEqual(10, counts[i])
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(reshuffle=[True, False]),
combinations.combine(graph_seed=38, op_seed=None) +
combinations.combine(graph_seed=None, op_seed=42) +
combinations.combine(graph_seed=38, op_seed=42)))
def testShuffleSeed(self, reshuffle, graph_seed, op_seed):
results = []
for _ in range(2):
with ops.Graph().as_default() as g:
random_seed.set_random_seed(graph_seed)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=op_seed, reshuffle_each_iteration=reshuffle).repeat(3)
iterator = dataset_ops.make_one_shot_iterator(dataset)
next_element = iterator.get_next()
run_results = []
with self.session(graph=g) as sess:
for _ in range(30):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertAllEqual(results[0], results[1])
# TODO(b/117581999): enable this test for eager-mode.
@combinations.generate(
combinations.times(
test_base.graph_only_combinations(),
combinations.combine(
reshuffle=[True, False], initializable=[True, False])))
def testMultipleIterators(self, reshuffle, initializable):
with ops.Graph().as_default() as g:
dataset = dataset_ops.Dataset.range(100).shuffle(
10, reshuffle_each_iteration=reshuffle).repeat(3)
if initializable:
iterators = [dataset_ops.make_initializable_iterator(dataset)
for _ in range(2)]
else:
iterators = [dataset_ops.make_one_shot_iterator(dataset)
for _ in range(2)]
results = []
with self.session(graph=g) as sess:
for iterator in iterators:
if initializable:
sess.run(iterator.initializer)
next_element = iterator.get_next()
run_results = []
for _ in range(300):
run_results.append(sess.run(next_element))
with self.assertRaises(errors.OutOfRangeError):
sess.run(next_element)
results.append(run_results)
self.assertNotEqual(results[0], results[1])
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleRepeatEpochs(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle).repeat(2)
next_element = self.getNext(dataset)
first_epoch = []
for _ in range(10):
first_epoch.append(self.evaluate(next_element()))
second_epoch = []
for _ in range(10):
second_epoch.append(self.evaluate(next_element()))
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(
combinations.times(
combinations.combine(tf_api_version=2, mode="eager"),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleIterationEpochs(self, reshuffle, seed):
# TensorFlow unit tests set the global graph seed. We unset it here so that
# we can control determinism via the `seed` parameter.
random_seed.set_random_seed(None)
dataset = dataset_ops.Dataset.range(10).shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle)
first_epoch = self.getDatasetOutput(dataset)
second_epoch = self.getDatasetOutput(dataset)
self.assertEqual(first_epoch == second_epoch, not reshuffle)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2ResourceCapture(self):
def make_dataset():
ids = dataset_ops.Dataset.range(10)
ids = ids.shuffle(1)
def interleave_fn(dataset, _):
return dataset
dataset = dataset_ops.Dataset.range(1)
dataset = dataset.interleave(functools.partial(interleave_fn, ids))
return dataset
results = []
for elem in make_dataset():
results.append(elem.numpy())
self.assertAllEqual(results, range(10))
@combinations.generate(
combinations.times(
test_base.eager_only_combinations(),
combinations.combine(reshuffle=[True, False], seed=[None, 42])))
def testReshuffleSeparateTransformations(self, reshuffle, seed):
dataset = dataset_ops.Dataset.range(10)
first_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
first_epoch.append(elem.numpy())
second_epoch = []
for elem in dataset.shuffle(
10, seed=seed, reshuffle_each_iteration=reshuffle):
second_epoch.append(elem.numpy())
self.assertEqual(first_epoch != second_epoch, seed is None)
@combinations.generate(combinations.combine(tf_api_version=2, mode="eager"))
def testShuffleV2InFunction(self):
counter_var = variables.Variable(0)
@function.defun
def consume():
ds = dataset_ops.Dataset.range(10)
ds = ds.shuffle(1)
for _ in ds:
counter_var.assign(counter_var + 1)
consume()
self.assertAllEqual(self.evaluate(counter_var), 10)
@combinations.generate(test_base.default_test_combinations())
def testEmptyDataset(self):
dataset = dataset_ops.Dataset.from_tensors(1)
def map_fn(x):
with ops.control_dependencies([check_ops.assert_equal(x, 0)]):
return x
dataset = dataset.map(map_fn)
dataset = dataset.cache()
dataset = dataset.shuffle(buffer_size=10).repeat()
get_next = self.getNext(dataset)
# First time around, we get an error for the failed assertion.
with self.assertRaises(errors.InvalidArgumentError):
self.evaluate(get_next())
# Second time around, we get an EOF because the cached dataset is empty.
with self.assertRaises(errors.OutOfRangeError):
self.evaluate(get_next())
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(reshuffle=[True, False])))
def testRerandomizeOnReplicate(self, reshuffle):
random_seed.set_random_seed(None)
# When no seeds are fixed, each instantiation of the shuffle dataset should
# produce elements in a different order.
num_elements = 100
dataset = dataset_ops.Dataset.range(num_elements)
dataset = dataset.shuffle(num_elements, reshuffle_each_iteration=reshuffle)
shuffle_1 = self.getDatasetOutput(dataset)
dataset = self.graphRoundTrip(dataset, allow_stateful=True)
shuffle_2 = self.getDatasetOutput(dataset)
self.assertCountEqual(shuffle_1, shuffle_2)
self.assertNotEqual(shuffle_1, shuffle_2)
@combinations.generate(test_base.eager_only_combinations())
def testCheckpointLargeShuffleBuffer(self):
# Tensor of size 100M
dataset = dataset_ops.Dataset.from_tensors(
array_ops.ones((25, 1000, 1000), dtype=dtypes.float32))
dataset = dataset.repeat()
# Shuffle 25 tensors to exceed the 2GB protocol buffer limit
dataset = dataset.shuffle(25)
iterator = iter(dataset)
next(iterator) # request an element to fill the shuffle buffer
ckpt = trackable_utils.Checkpoint(iterator=iterator)
manager = checkpoint_management.CheckpointManager(
ckpt, self.get_temp_dir(), max_to_keep=1)
manager.save()
ckpt.restore(manager.latest_checkpoint)
class ShuffleCheckpointTest(checkpoint_test_base.CheckpointTestBase,
parameterized.TestCase):
def _build_shuffle_dataset(
self,
range_limit=10,
num_repeats=5,
buffer_size=5,
seed=None,
reshuffle_each_iteration=None,
):
return dataset_ops.Dataset.range(range_limit).shuffle(
buffer_size,
seed=seed,
reshuffle_each_iteration=reshuffle_each_iteration).repeat(num_repeats)
@combinations.generate(
combinations.times(
test_base.default_test_combinations(),
combinations.combine(
reshuffle_each_iteration=[True, False],
buffer_size=[1, 3, 5, 8, 10])))
def testShuffleCore(self, reshuffle_each_iteration, buffer_size):
seed = 55
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
# pylint: disable=g-long-lambda
self.run_core_tests(
lambda: self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=seed,
reshuffle_each_iteration=reshuffle_each_iteration), num_outputs)
@combinations.generate(
combinations.combine(
tf_api_version=1,
mode=["graph"],
reshuffle_each_iteration=[True, False],
buffer_size=[1, 3, 5, 8, 10]))
def testMultipleIterators(self, reshuffle_each_iteration, buffer_size):
range_limit = 5
num_repeats = 2
num_outputs = range_limit * num_repeats
def ds_fn():
# pylint: disable=cell-var-from-loop
return self._build_shuffle_dataset(
range_limit=range_limit,
num_repeats=num_repeats,
buffer_size=buffer_size,
seed=None, # Iterator seeds are generated non-deterministically.
reshuffle_each_iteration=reshuffle_each_iteration)
# pylint: enable=cell-var-from-loop
with ops.Graph().as_default() as g:
ds = ds_fn()
iterators = [ds.make_one_shot_iterator(), ds.make_one_shot_iterator()]
get_next_ops = [it.get_next() for it in iterators]
saveables = [
contrib_iterator_ops.make_saveable_from_iterator(it)
for it in iterators
]
for saveable in saveables:
ops.add_to_collection(ops.GraphKeys.SAVEABLE_OBJECTS, saveable)
saver = saver_lib.Saver(allow_empty=True)
with self.session(graph=g) as sess:
self._save(sess, saver)
expected = [self.evaluate(get_next_ops) for _ in range(num_outputs)]
self._restore(saver, sess)
actual = [self.evaluate(get_next_ops) for _ in range(num_outputs)]
self.match(expected, actual)
if __name__ == "__main__":
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.