repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
ebber/noteCreationServer
|
flask/lib/python3.5/site-packages/setuptools/command/rotate.py
|
461
|
2038
|
from distutils.util import convert_path
from distutils import log
from distutils.errors import DistutilsOptionError
import os
from setuptools import Command
from setuptools.compat import basestring
class rotate(Command):
"""Delete older distributions"""
description = "delete older distributions, keeping N newest files"
user_options = [
('match=', 'm', "patterns to match (required)"),
('dist-dir=', 'd', "directory where the distributions are"),
('keep=', 'k', "number of matching distributions to keep"),
]
boolean_options = []
def initialize_options(self):
self.match = None
self.dist_dir = None
self.keep = None
def finalize_options(self):
if self.match is None:
raise DistutilsOptionError(
"Must specify one or more (comma-separated) match patterns "
"(e.g. '.zip' or '.egg')"
)
if self.keep is None:
raise DistutilsOptionError("Must specify number of files to keep")
try:
self.keep = int(self.keep)
except ValueError:
raise DistutilsOptionError("--keep must be an integer")
if isinstance(self.match, basestring):
self.match = [
convert_path(p.strip()) for p in self.match.split(',')
]
self.set_undefined_options('bdist', ('dist_dir', 'dist_dir'))
def run(self):
self.run_command("egg_info")
from glob import glob
for pattern in self.match:
pattern = self.distribution.get_name() + '*' + pattern
files = glob(os.path.join(self.dist_dir, pattern))
files = [(os.path.getmtime(f), f) for f in files]
files.sort()
files.reverse()
log.info("%d file(s) matching %s", len(files), pattern)
files = files[self.keep:]
for (t, f) in files:
log.info("Deleting %s", f)
if not self.dry_run:
os.unlink(f)
|
gpl-2.0
|
bjzhang/xen
|
tools/python/genwrap.py
|
49
|
11753
|
#!/usr/bin/python
import sys,os
import idl
(TYPE_DEFBOOL, TYPE_BOOL, TYPE_INT, TYPE_UINT, TYPE_STRING, TYPE_ARRAY, TYPE_AGGREGATE) = range(7)
def py_type(ty):
if ty == idl.bool:
return TYPE_BOOL
if ty.typename == "libxl_defbool":
return TYPE_DEFBOOL
if isinstance(ty, idl.Enumeration):
return TYPE_UINT
if isinstance(ty, idl.Number):
if ty.signed:
return TYPE_INT
else:
return TYPE_UINT
if isinstance(ty, idl.Array):
return TYPE_ARRAY
if isinstance(ty, idl.Aggregate):
return TYPE_AGGREGATE
if ty == idl.string:
return TYPE_STRING
return None
def py_wrapstruct(ty):
l = []
l.append('typedef struct {')
l.append(' PyObject_HEAD;')
l.append(' %s obj;'%ty.typename);
l.append('}Py_%s;'%ty.rawname)
l.append('')
return "\n".join(l) + "\n"
def fsanitize(name):
"Sanitise a function name given a C type"
ret = '_'.join(name.split())
return ret.replace('*', 'ptr')
def py_decls(ty):
l = []
if isinstance(ty, idl.Aggregate):
l.append('_hidden Py_%s *Py%s_New(void);\n'%(ty.rawname, ty.rawname))
l.append('_hidden int Py%s_Check(PyObject *self);\n'%ty.rawname)
for f in ty.fields:
if py_type(f.type) is not None:
continue
if py_type(f.type) == TYPE_DEFBOOL:
continue
if ty.marshal_out():
l.append('_hidden PyObject *attrib__%s_get(%s *%s);'%(\
fsanitize(f.type.typename), f.type.typename, f.name))
if ty.marshal_in():
l.append('_hidden int attrib__%s_set(PyObject *v, %s *%s);'%(\
fsanitize(f.type.typename), f.type.typename, f.name))
return '\n'.join(l) + "\n"
def py_attrib_get(ty, f):
t = py_type(f.type)
l = []
l.append('static PyObject *py_%s_%s_get(Py_%s *self, void *priv)'%(ty.rawname, f.name, ty.rawname))
l.append('{')
if t == TYPE_BOOL:
l.append(' PyObject *ret;')
l.append(' ret = (self->obj.%s) ? Py_True : Py_False;'%f.name)
l.append(' Py_INCREF(ret);')
l.append(' return ret;')
elif t == TYPE_DEFBOOL:
l.append(' return genwrap__defbool_get(&self->obj.%s);'%f.name)
elif t == TYPE_INT:
l.append(' return genwrap__ll_get(self->obj.%s);'%f.name)
elif t == TYPE_UINT:
l.append(' return genwrap__ull_get(self->obj.%s);'%f.name)
elif t == TYPE_STRING:
l.append(' return genwrap__string_get(&self->obj.%s);'%f.name)
elif t == TYPE_AGGREGATE or t == TYPE_ARRAY:
l.append(' PyErr_SetString(PyExc_NotImplementedError, "Getting %s");'%ty.typename)
l.append(' return NULL;')
else:
tn = f.type.typename
l.append(' return attrib__%s_get((%s *)&self->obj.%s);'%(fsanitize(tn), tn, f.name))
l.append('}')
return '\n'.join(l) + "\n\n"
def py_attrib_set(ty, f):
t = py_type(f.type)
l = []
l.append('static int py_%s_%s_set(Py_%s *self, PyObject *v, void *priv)'%(ty.rawname, f.name, ty.rawname))
l.append('{')
if t == TYPE_BOOL:
l.append(' self->obj.%s = (NULL == v || Py_None == v || Py_False == v) ? 0 : 1;'%f.name)
l.append(' return 0;')
elif t == TYPE_DEFBOOL:
l.append(' return genwrap__defbool_set(v, &self->obj.%s);'%f.name)
elif t == TYPE_UINT or t == TYPE_INT:
l.append(' %slong long tmp;'%(t == TYPE_UINT and 'unsigned ' or ''))
l.append(' int ret;')
if t == TYPE_UINT:
l.append(' ret = genwrap__ull_set(v, &tmp, (%s)~0);'%f.type.typename)
else:
l.append(' ret = genwrap__ll_set(v, &tmp, (%s)~0);'%f.type.typename)
l.append(' if ( ret >= 0 )')
l.append(' self->obj.%s = tmp;'%f.name)
l.append(' return ret;')
elif t == TYPE_STRING:
l.append(' return genwrap__string_set(v, &self->obj.%s);'%f.name)
elif t == TYPE_AGGREGATE or t == TYPE_ARRAY:
l.append(' PyErr_SetString(PyExc_NotImplementedError, "Setting %s");'%ty.typename)
l.append(' return -1;')
else:
tn = f.type.typename
l.append(' return attrib__%s_set(v, (%s *)&self->obj.%s);'%(fsanitize(tn), tn, f.name))
l.append('}')
return '\n'.join(l) + "\n\n"
def py_object_def(ty):
l = []
if ty.dispose_fn is not None:
dtor = ' %s(&self->obj);\n'%ty.dispose_fn
else:
dtor = ''
funcs="""static void Py%(rawname)s_dealloc(Py_%(rawname)s *self)
{
%(dtor)s self->ob_type->tp_free((PyObject *)self);
}
static int Py%(rawname)s_init(Py_%(rawname)s *self, PyObject *args, PyObject *kwds)
{
memset(&self->obj, 0, sizeof(self->obj));
return genwrap__obj_init((PyObject *)self, args, kwds);
}
static PyObject *Py%(rawname)s_new(PyTypeObject *type, PyObject *args, PyObject *kwds)
{
Py_%(rawname)s *self = (Py_%(rawname)s *)type->tp_alloc(type, 0);
if (self == NULL)
return NULL;
memset(&self->obj, 0, sizeof(self->obj));
return (PyObject *)self;
}
"""%{'rawname': ty.rawname, 'dtor': dtor}
l.append('static PyGetSetDef Py%s_getset[] = {'%ty.rawname)
for f in ty.fields:
if f.type.private:
continue
l.append(' { .name = "%s", '%f.name)
if ty.marshal_out():
l.append(' .get = (getter)py_%s_%s_get, '%(ty.rawname, f.name))
else:
l.append(' .get = (getter)NULL, ')
if ty.marshal_in():
l.append(' .set = (setter)py_%s_%s_set,'%(ty.rawname, f.name))
else:
l.append(' .set = (setter)NULL,')
l.append(' },')
l.append(' { .name = NULL }')
l.append('};')
struct="""
static PyTypeObject Py%s_Type= {
PyObject_HEAD_INIT(NULL)
0,
PKG ".%s",
sizeof(Py_%s),
0,
(destructor)Py%s_dealloc, /* tp_dealloc */
NULL, /* tp_print */
NULL, /* tp_getattr */
NULL, /* tp_setattr */
NULL, /* tp_compare */
NULL, /* tp_repr */
NULL, /* tp_as_number */
NULL, /* tp_as_sequence */
NULL, /* tp_as_mapping */
NULL, /* tp_hash */
NULL, /* tp_call */
NULL, /* tp_str */
NULL, /* tp_getattro */
NULL, /* tp_setattro */
NULL, /* tp_as_buffer */
Py_TPFLAGS_DEFAULT|Py_TPFLAGS_BASETYPE, /* tp_flags */
"%s", /* tp_doc */
NULL, /* tp_traverse */
NULL, /* tp_clear */
NULL, /* tp_richcompare */
0, /* tp_weaklistoffset */
NULL, /* tp_iter */
NULL, /* tp_iternext */
NULL, /* tp_methods */
NULL, /* tp_members */
Py%s_getset, /* tp_getset */
NULL, /* tp_base */
NULL, /* tp_dict */
NULL, /* tp_descr_get */
NULL, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)Py%s_init, /* tp_init */
NULL, /* tp_alloc */
Py%s_new, /* tp_new */
};
Py_%s *Py%s_New(void)
{
return (Py_%s *)Py%s_new(&Py%s_Type, NULL, NULL);
}
int Py%s_Check(PyObject *self)
{
return (self->ob_type == &Py%s_Type);
}
"""%tuple(ty.rawname for x in range(15))
return funcs + '\n'.join(l) + "\n" + struct
def py_initfuncs(types):
l = []
l.append('void genwrap__init(PyObject *m)')
l.append('{')
for ty in types:
if isinstance(ty, idl.Enumeration):
for v in ty.values:
l.append(' PyModule_AddIntConstant(m, "%s", %s);' % (v.rawname, v.name))
elif isinstance(ty, idl.Aggregate):
l.append(' if (PyType_Ready(&Py%s_Type) >= 0) {'%ty.rawname)
l.append(' Py_INCREF(&Py%s_Type);'%ty.rawname)
l.append(' PyModule_AddObject(m, "%s", (PyObject *)&Py%s_Type);'%(ty.rawname, ty.rawname))
l.append(' }')
else:
raise NotImplementedError("unknown type %s (%s)" % (ty.typename, type(ty)))
l.append('}')
return '\n'.join(l) + "\n\n"
def tree_frob(types):
ret = types[:]
for ty in [ty for ty in ret if isinstance(ty, idl.Aggregate)]:
ty.fields = filter(lambda f:f.name is not None and f.type.typename is not None, ty.fields)
return ret
if __name__ == '__main__':
if len(sys.argv) < 4:
print >>sys.stderr, "Usage: genwrap.py <idl> <decls> <defns>"
sys.exit(1)
(_,types) = idl.parse(sys.argv[1])
types = tree_frob(types)
decls = sys.argv[2]
f = open(decls, 'w')
f.write("""#ifndef __PYXL_TYPES_H
#define __PYXL_TYPES_H
/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
#define PKG "xen.lowlevel.xl"
#if __GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
#define _hidden __attribute__((visibility("hidden")))
#define _protected __attribute__((visibility("protected")))
#else
#define _hidden
#define _protected
#endif
/* Initialise all types */
_hidden void genwrap__init(PyObject *m);
/* Generic type initialiser */
_hidden int genwrap__obj_init(PyObject *self, PyObject *args, PyObject *kwds);
/* Auto-generated get/set functions for simple data-types */
_hidden int genwrap__string_set(PyObject *v, char **str);
_hidden PyObject *genwrap__string_get(char **str);
_hidden PyObject *genwrap__ull_get(unsigned long long val);
_hidden int genwrap__ull_set(PyObject *v, unsigned long long *val, unsigned long long mask);
_hidden PyObject *genwrap__ll_get(long long val);
_hidden int genwrap__ll_set(PyObject *v, long long *val, long long mask);
_hidden PyObject *genwrap__defbool_get(libxl_defbool *db);
_hidden int genwrap__defbool_set(PyObject *v, libxl_defbool *db);
""" % " ".join(sys.argv))
for ty in [ty for ty in types if isinstance(ty, idl.Aggregate)]:
f.write('/* Internal API for %s wrapper */\n'%ty.typename)
f.write(py_wrapstruct(ty))
f.write(py_decls(ty))
f.write('\n')
f.write('#endif /* __PYXL_TYPES_H */\n')
f.close()
defns = sys.argv[3]
f = open(defns, 'w')
f.write("""/*
* DO NOT EDIT.
*
* This file is autogenerated by
* "%s"
*/
#include <Python.h>
#include <string.h>
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
#include "libxl.h" /* gah */
#include "%s"
""" % tuple((' '.join(sys.argv),) + (os.path.split(decls)[-1:]),))
for ty in types:
if ty.private:
continue
if isinstance(ty, idl.Aggregate):
f.write('/* Attribute get/set functions for %s */\n'%ty.typename)
for a in ty.fields:
if a.type.private:
continue
if ty.marshal_out():
f.write(py_attrib_get(ty,a))
if ty.marshal_in():
f.write(py_attrib_set(ty,a))
f.write(py_object_def(ty))
f.write(py_initfuncs(types))
f.close()
|
gpl-2.0
|
DougFirErickson/qgisSpaceSyntaxToolkit
|
esstoolkit/external/networkx/drawing/nx_agraph.py
|
40
|
13359
|
"""
***************
Graphviz AGraph
***************
Interface to pygraphviz AGraph class.
Examples
--------
>>> G=nx.complete_graph(5)
>>> A=nx.to_agraph(G)
>>> H=nx.from_agraph(A)
See Also
--------
Pygraphviz: http://networkx.lanl.gov/pygraphviz
"""
# Copyright (C) 2004-2012 by
# Aric Hagberg <[email protected]>
# Dan Schult <[email protected]>
# Pieter Swart <[email protected]>
# All rights reserved.
# BSD license.
import os
import sys
import tempfile
import networkx as nx
__author__ = """Aric Hagberg ([email protected])"""
__all__ = ['from_agraph', 'to_agraph',
'write_dot', 'read_dot',
'graphviz_layout',
'pygraphviz_layout',
'view_pygraphviz']
def from_agraph(A,create_using=None):
"""Return a NetworkX Graph or DiGraph from a PyGraphviz graph.
Parameters
----------
A : PyGraphviz AGraph
A graph created with PyGraphviz
create_using : NetworkX graph class instance
The output is created using the given graph class instance
Examples
--------
>>> K5=nx.complete_graph(5)
>>> A=nx.to_agraph(K5)
>>> G=nx.from_agraph(A)
>>> G=nx.from_agraph(A)
Notes
-----
The Graph G will have a dictionary G.graph_attr containing
the default graphviz attributes for graphs, nodes and edges.
Default node attributes will be in the dictionary G.node_attr
which is keyed by node.
Edge attributes will be returned as edge data in G. With
edge_attr=False the edge data will be the Graphviz edge weight
attribute or the value 1 if no edge weight attribute is found.
"""
if create_using is None:
if A.is_directed():
if A.is_strict():
create_using=nx.DiGraph()
else:
create_using=nx.MultiDiGraph()
else:
if A.is_strict():
create_using=nx.Graph()
else:
create_using=nx.MultiGraph()
# assign defaults
N=nx.empty_graph(0,create_using)
N.name=''
if A.name is not None:
N.name=A.name
# add nodes, attributes to N.node_attr
for n in A.nodes():
str_attr=dict((str(k),v) for k,v in n.attr.items())
N.add_node(str(n),**str_attr)
# add edges, assign edge data as dictionary of attributes
for e in A.edges():
u,v=str(e[0]),str(e[1])
attr=dict(e.attr)
str_attr=dict((str(k),v) for k,v in attr.items())
if not N.is_multigraph():
if e.name is not None:
str_attr['key']=e.name
N.add_edge(u,v,**str_attr)
else:
N.add_edge(u,v,key=e.name,**str_attr)
# add default attributes for graph, nodes, and edges
# hang them on N.graph_attr
N.graph['graph']=dict(A.graph_attr)
N.graph['node']=dict(A.node_attr)
N.graph['edge']=dict(A.edge_attr)
return N
def to_agraph(N):
"""Return a pygraphviz graph from a NetworkX graph N.
Parameters
----------
N : NetworkX graph
A graph created with NetworkX
Examples
--------
>>> K5=nx.complete_graph(5)
>>> A=nx.to_agraph(K5)
Notes
-----
If N has an dict N.graph_attr an attempt will be made first
to copy properties attached to the graph (see from_agraph)
and then updated with the calling arguments if any.
"""
try:
import pygraphviz
except ImportError:
raise ImportError('requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
directed=N.is_directed()
strict=N.number_of_selfloops()==0 and not N.is_multigraph()
A=pygraphviz.AGraph(name=N.name,strict=strict,directed=directed)
# default graph attributes
A.graph_attr.update(N.graph.get('graph',{}))
A.node_attr.update(N.graph.get('node',{}))
A.edge_attr.update(N.graph.get('edge',{}))
# add nodes
for n,nodedata in N.nodes(data=True):
A.add_node(n,**nodedata)
# loop over edges
if N.is_multigraph():
for u,v,key,edgedata in N.edges_iter(data=True,keys=True):
str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
A.add_edge(u,v,key=str(key),**str_edgedata)
else:
for u,v,edgedata in N.edges_iter(data=True):
str_edgedata=dict((k,str(v)) for k,v in edgedata.items())
A.add_edge(u,v,**str_edgedata)
return A
def write_dot(G,path):
"""Write NetworkX graph G to Graphviz dot format on path.
Parameters
----------
G : graph
A networkx graph
path : filename
Filename or file handle to write
"""
try:
import pygraphviz
except ImportError:
raise ImportError('requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
A=to_agraph(G)
A.write(path)
A.clear()
return
def read_dot(path):
"""Return a NetworkX graph from a dot file on path.
Parameters
----------
path : file or string
File name or file handle to read.
"""
try:
import pygraphviz
except ImportError:
raise ImportError('read_dot() requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
A=pygraphviz.AGraph(file=path)
return from_agraph(A)
def graphviz_layout(G,prog='neato',root=None, args=''):
"""Create node positions for G using Graphviz.
Parameters
----------
G : NetworkX graph
A graph created with NetworkX
prog : string
Name of Graphviz layout program
root : string, optional
Root node for twopi layout
args : string, optional
Extra arguments to Graphviz layout program
Returns : dictionary
Dictionary of x,y, positions keyed by node.
Examples
--------
>>> G=nx.petersen_graph()
>>> pos=nx.graphviz_layout(G)
>>> pos=nx.graphviz_layout(G,prog='dot')
Notes
-----
This is a wrapper for pygraphviz_layout.
"""
return pygraphviz_layout(G,prog=prog,root=root,args=args)
def pygraphviz_layout(G,prog='neato',root=None, args=''):
"""Create node positions for G using Graphviz.
Parameters
----------
G : NetworkX graph
A graph created with NetworkX
prog : string
Name of Graphviz layout program
root : string, optional
Root node for twopi layout
args : string, optional
Extra arguments to Graphviz layout program
Returns : dictionary
Dictionary of x,y, positions keyed by node.
Examples
--------
>>> G=nx.petersen_graph()
>>> pos=nx.graphviz_layout(G)
>>> pos=nx.graphviz_layout(G,prog='dot')
"""
try:
import pygraphviz
except ImportError:
raise ImportError('requires pygraphviz ',
'http://networkx.lanl.gov/pygraphviz ',
'(not available for Python3)')
if root is not None:
args+="-Groot=%s"%root
A=to_agraph(G)
A.layout(prog=prog,args=args)
node_pos={}
for n in G:
node=pygraphviz.Node(A,n)
try:
xx,yy=node.attr["pos"].split(',')
node_pos[n]=(float(xx),float(yy))
except:
print("no position for node",n)
node_pos[n]=(0.0,0.0)
return node_pos
@nx.utils.open_file(5, 'w')
def view_pygraphviz(G, edgelabel=None, prog='dot', args='',
suffix='', path=None):
"""Views the graph G using the specified layout algorithm.
Parameters
----------
G : NetworkX graph
The machine to draw.
edgelabel : str, callable, None
If a string, then it specifes the edge attribute to be displayed
on the edge labels. If a callable, then it is called for each
edge and it should return the string to be displayed on the edges.
The function signature of `edgelabel` should be edgelabel(data),
where `data` is the edge attribute dictionary.
prog : string
Name of Graphviz layout program.
args : str
Additional arguments to pass to the Graphviz layout program.
suffix : str
If `filename` is None, we save to a temporary file. The value of
`suffix` will appear at the tail end of the temporary filename.
path : str, None
The filename used to save the image. If None, save to a temporary
file. File formats are the same as those from pygraphviz.agraph.draw.
Returns
-------
path : str
The filename of the generated image.
A : PyGraphviz graph
The PyGraphviz graph instance used to generate the image.
Notes
-----
If this function is called in succession too quickly, sometimes the
image is not displayed. So you might consider time.sleep(.5) between
calls if you experience problems.
"""
if not len(G):
raise nx.NetworkXException("An empty graph cannot be drawn.")
import pygraphviz
# If we are providing default values for graphviz, these must be set
# before any nodes or edges are added to the PyGraphviz graph object.
# The reason for this is that default values only affect incoming objects.
# If you change the default values after the objects have been added,
# then they inherit no value and are set only if explicitly set.
# to_agraph() uses these values.
attrs = ['edge', 'node', 'graph']
for attr in attrs:
if attr not in G.graph:
G.graph[attr] = {}
# These are the default values.
edge_attrs = {'fontsize': '10'}
node_attrs = {'style': 'filled',
'fillcolor': '#0000FF40',
'height': '0.75',
'width': '0.75',
'shape': 'circle'}
graph_attrs = {}
def update_attrs(which, attrs):
# Update graph attributes. Return list of those which were added.
added = []
for k,v in attrs.items():
if k not in G.graph[which]:
G.graph[which][k] = v
added.append(k)
def clean_attrs(which, added):
# Remove added attributes
for attr in added:
del G.graph[which][attr]
if not G.graph[which]:
del G.graph[which]
# Update all default values
update_attrs('edge', edge_attrs)
update_attrs('node', node_attrs)
update_attrs('graph', graph_attrs)
# Convert to agraph, so we inherit default values
A = to_agraph(G)
# Remove the default values we added to the original graph.
clean_attrs('edge', edge_attrs)
clean_attrs('node', node_attrs)
clean_attrs('graph', graph_attrs)
# If the user passed in an edgelabel, we update the labels for all edges.
if edgelabel is not None:
if not hasattr(edgelabel, '__call__'):
def func(data):
return ''.join([" ", str(data[edgelabel]), " "])
else:
func = edgelabel
# update all the edge labels
if G.is_multigraph():
for u,v,key,data in G.edges_iter(keys=True, data=True):
# PyGraphviz doesn't convert the key to a string. See #339
edge = A.get_edge(u,v,str(key))
edge.attr['label'] = str(func(data))
else:
for u,v,data in G.edges_iter(data=True):
edge = A.get_edge(u,v)
edge.attr['label'] = str(func(data))
if path is None:
ext = 'png'
if suffix:
suffix = '_%s.%s' % (suffix, ext)
else:
suffix = '.%s' % (ext,)
path = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
else:
# Assume the decorator worked and it is a file-object.
pass
display_pygraphviz(A, path=path, prog=prog, args=args)
return path.name, A
def display_pygraphviz(graph, path, format=None, prog=None, args=''):
"""Internal function to display a graph in OS dependent manner.
Parameters
----------
graph : PyGraphviz graph
A PyGraphviz AGraph instance.
path : file object
An already opened file object that will be closed.
format : str, None
An attempt is made to guess the output format based on the extension
of the filename. If that fails, the value of `format` is used.
prog : string
Name of Graphviz layout program.
args : str
Additional arguments to pass to the Graphviz layout program.
Notes
-----
If this function is called in succession too quickly, sometimes the
image is not displayed. So you might consider time.sleep(.5) between
calls if you experience problems.
"""
if format is None:
filename = path.name
format = os.path.splitext(filename)[1].lower()[1:]
if not format:
# Let the draw() function use its default
format = None
# Save to a file and display in the default viewer.
# We must close the file before viewing it.
graph.draw(path, format, prog, args)
path.close()
nx.utils.default_opener(filename)
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import pygraphviz
except:
raise SkipTest("pygraphviz not available")
|
gpl-3.0
|
dmonakhov/googletest
|
scripts/pump.py
|
603
|
23316
|
#!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""pump v0.2.0 - Pretty Useful for Meta Programming.
A tool for preprocessor meta programming. Useful for generating
repetitive boilerplate code. Especially useful for writing C++
classes, functions, macros, and templates that need to work with
various number of arguments.
USAGE:
pump.py SOURCE_FILE
EXAMPLES:
pump.py foo.cc.pump
Converts foo.cc.pump to foo.cc.
GRAMMAR:
CODE ::= ATOMIC_CODE*
ATOMIC_CODE ::= $var ID = EXPRESSION
| $var ID = [[ CODE ]]
| $range ID EXPRESSION..EXPRESSION
| $for ID SEPARATOR [[ CODE ]]
| $($)
| $ID
| $(EXPRESSION)
| $if EXPRESSION [[ CODE ]] ELSE_BRANCH
| [[ CODE ]]
| RAW_CODE
SEPARATOR ::= RAW_CODE | EMPTY
ELSE_BRANCH ::= $else [[ CODE ]]
| $elif EXPRESSION [[ CODE ]] ELSE_BRANCH
| EMPTY
EXPRESSION has Python syntax.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import re
import sys
TOKEN_TABLE = [
(re.compile(r'\$var\s+'), '$var'),
(re.compile(r'\$elif\s+'), '$elif'),
(re.compile(r'\$else\s+'), '$else'),
(re.compile(r'\$for\s+'), '$for'),
(re.compile(r'\$if\s+'), '$if'),
(re.compile(r'\$range\s+'), '$range'),
(re.compile(r'\$[_A-Za-z]\w*'), '$id'),
(re.compile(r'\$\(\$\)'), '$($)'),
(re.compile(r'\$'), '$'),
(re.compile(r'\[\[\n?'), '[['),
(re.compile(r'\]\]\n?'), ']]'),
]
class Cursor:
"""Represents a position (line and column) in a text file."""
def __init__(self, line=-1, column=-1):
self.line = line
self.column = column
def __eq__(self, rhs):
return self.line == rhs.line and self.column == rhs.column
def __ne__(self, rhs):
return not self == rhs
def __lt__(self, rhs):
return self.line < rhs.line or (
self.line == rhs.line and self.column < rhs.column)
def __le__(self, rhs):
return self < rhs or self == rhs
def __gt__(self, rhs):
return rhs < self
def __ge__(self, rhs):
return rhs <= self
def __str__(self):
if self == Eof():
return 'EOF'
else:
return '%s(%s)' % (self.line + 1, self.column)
def __add__(self, offset):
return Cursor(self.line, self.column + offset)
def __sub__(self, offset):
return Cursor(self.line, self.column - offset)
def Clone(self):
"""Returns a copy of self."""
return Cursor(self.line, self.column)
# Special cursor to indicate the end-of-file.
def Eof():
"""Returns the special cursor to denote the end-of-file."""
return Cursor(-1, -1)
class Token:
"""Represents a token in a Pump source file."""
def __init__(self, start=None, end=None, value=None, token_type=None):
if start is None:
self.start = Eof()
else:
self.start = start
if end is None:
self.end = Eof()
else:
self.end = end
self.value = value
self.token_type = token_type
def __str__(self):
return 'Token @%s: \'%s\' type=%s' % (
self.start, self.value, self.token_type)
def Clone(self):
"""Returns a copy of self."""
return Token(self.start.Clone(), self.end.Clone(), self.value,
self.token_type)
def StartsWith(lines, pos, string):
"""Returns True iff the given position in lines starts with 'string'."""
return lines[pos.line][pos.column:].startswith(string)
def FindFirstInLine(line, token_table):
best_match_start = -1
for (regex, token_type) in token_table:
m = regex.search(line)
if m:
# We found regex in lines
if best_match_start < 0 or m.start() < best_match_start:
best_match_start = m.start()
best_match_length = m.end() - m.start()
best_match_token_type = token_type
if best_match_start < 0:
return None
return (best_match_start, best_match_length, best_match_token_type)
def FindFirst(lines, token_table, cursor):
"""Finds the first occurrence of any string in strings in lines."""
start = cursor.Clone()
cur_line_number = cursor.line
for line in lines[start.line:]:
if cur_line_number == start.line:
line = line[start.column:]
m = FindFirstInLine(line, token_table)
if m:
# We found a regex in line.
(start_column, length, token_type) = m
if cur_line_number == start.line:
start_column += start.column
found_start = Cursor(cur_line_number, start_column)
found_end = found_start + length
return MakeToken(lines, found_start, found_end, token_type)
cur_line_number += 1
# We failed to find str in lines
return None
def SubString(lines, start, end):
"""Returns a substring in lines."""
if end == Eof():
end = Cursor(len(lines) - 1, len(lines[-1]))
if start >= end:
return ''
if start.line == end.line:
return lines[start.line][start.column:end.column]
result_lines = ([lines[start.line][start.column:]] +
lines[start.line + 1:end.line] +
[lines[end.line][:end.column]])
return ''.join(result_lines)
def StripMetaComments(str):
"""Strip meta comments from each line in the given string."""
# First, completely remove lines containing nothing but a meta
# comment, including the trailing \n.
str = re.sub(r'^\s*\$\$.*\n', '', str)
# Then, remove meta comments from contentful lines.
return re.sub(r'\s*\$\$.*', '', str)
def MakeToken(lines, start, end, token_type):
"""Creates a new instance of Token."""
return Token(start, end, SubString(lines, start, end), token_type)
def ParseToken(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = regex.search(line)
if m and not m.start():
return MakeToken(lines, pos, pos + m.end(), token_type)
else:
print 'ERROR: %s expected at %s.' % (token_type, pos)
sys.exit(1)
ID_REGEX = re.compile(r'[_A-Za-z]\w*')
EQ_REGEX = re.compile(r'=')
REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)')
OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*')
WHITE_SPACE_REGEX = re.compile(r'\s')
DOT_DOT_REGEX = re.compile(r'\.\.')
def Skip(lines, pos, regex):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m and not m.start():
return pos + m.end()
else:
return pos
def SkipUntil(lines, pos, regex, token_type):
line = lines[pos.line][pos.column:]
m = re.search(regex, line)
if m:
return pos + m.start()
else:
print ('ERROR: %s expected on line %s after column %s.' %
(token_type, pos.line + 1, pos.column))
sys.exit(1)
def ParseExpTokenInParens(lines, pos):
def ParseInParens(pos):
pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX)
pos = Skip(lines, pos, r'\(')
pos = Parse(pos)
pos = Skip(lines, pos, r'\)')
return pos
def Parse(pos):
pos = SkipUntil(lines, pos, r'\(|\)', ')')
if SubString(lines, pos, pos + 1) == '(':
pos = Parse(pos + 1)
pos = Skip(lines, pos, r'\)')
return Parse(pos)
else:
return pos
start = pos.Clone()
pos = ParseInParens(pos)
return MakeToken(lines, start, pos, 'exp')
def RStripNewLineFromToken(token):
if token.value.endswith('\n'):
return Token(token.start, token.end, token.value[:-1], token.token_type)
else:
return token
def TokenizeLines(lines, pos):
while True:
found = FindFirst(lines, TOKEN_TABLE, pos)
if not found:
yield MakeToken(lines, pos, Eof(), 'code')
return
if found.start == pos:
prev_token = None
prev_token_rstripped = None
else:
prev_token = MakeToken(lines, pos, found.start, 'code')
prev_token_rstripped = RStripNewLineFromToken(prev_token)
if found.token_type == '$var':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
eq_token = ParseToken(lines, pos, EQ_REGEX, '=')
yield eq_token
pos = Skip(lines, eq_token.end, r'\s*')
if SubString(lines, pos, pos + 2) != '[[':
exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp')
yield exp_token
pos = Cursor(exp_token.end.line + 1, 0)
elif found.token_type == '$for':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX)
elif found.token_type == '$range':
if prev_token_rstripped:
yield prev_token_rstripped
yield found
id_token = ParseToken(lines, found.end, ID_REGEX, 'id')
yield id_token
pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX)
dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..')
yield MakeToken(lines, pos, dots_pos, 'exp')
yield MakeToken(lines, dots_pos, dots_pos + 2, '..')
pos = dots_pos + 2
new_pos = Cursor(pos.line + 1, 0)
yield MakeToken(lines, pos, new_pos, 'exp')
pos = new_pos
elif found.token_type == '$':
if prev_token:
yield prev_token
yield found
exp_token = ParseExpTokenInParens(lines, found.end)
yield exp_token
pos = exp_token.end
elif (found.token_type == ']]' or found.token_type == '$if' or
found.token_type == '$elif' or found.token_type == '$else'):
if prev_token_rstripped:
yield prev_token_rstripped
yield found
pos = found.end
else:
if prev_token:
yield prev_token
yield found
pos = found.end
def Tokenize(s):
"""A generator that yields the tokens in the given string."""
if s != '':
lines = s.splitlines(True)
for token in TokenizeLines(lines, Cursor(0, 0)):
yield token
class CodeNode:
def __init__(self, atomic_code_list=None):
self.atomic_code = atomic_code_list
class VarNode:
def __init__(self, identifier=None, atomic_code=None):
self.identifier = identifier
self.atomic_code = atomic_code
class RangeNode:
def __init__(self, identifier=None, exp1=None, exp2=None):
self.identifier = identifier
self.exp1 = exp1
self.exp2 = exp2
class ForNode:
def __init__(self, identifier=None, sep=None, code=None):
self.identifier = identifier
self.sep = sep
self.code = code
class ElseNode:
def __init__(self, else_branch=None):
self.else_branch = else_branch
class IfNode:
def __init__(self, exp=None, then_branch=None, else_branch=None):
self.exp = exp
self.then_branch = then_branch
self.else_branch = else_branch
class RawCodeNode:
def __init__(self, token=None):
self.raw_code = token
class LiteralDollarNode:
def __init__(self, token):
self.token = token
class ExpNode:
def __init__(self, token, python_exp):
self.token = token
self.python_exp = python_exp
def PopFront(a_list):
head = a_list[0]
a_list[:1] = []
return head
def PushFront(a_list, elem):
a_list[:0] = [elem]
def PopToken(a_list, token_type=None):
token = PopFront(a_list)
if token_type is not None and token.token_type != token_type:
print 'ERROR: %s expected at %s' % (token_type, token.start)
print 'ERROR: %s found instead' % (token,)
sys.exit(1)
return token
def PeekToken(a_list):
if not a_list:
return None
return a_list[0]
def ParseExpNode(token):
python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value)
return ExpNode(token, python_exp)
def ParseElseNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
next = PeekToken(tokens)
if not next:
return None
if next.token_type == '$else':
Pop('$else')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
elif next.token_type == '$elif':
Pop('$elif')
exp = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
inner_else_node = ParseElseNode(tokens)
return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)])
elif not next.value.strip():
Pop('code')
return ParseElseNode(tokens)
else:
return None
def ParseAtomicCodeNode(tokens):
def Pop(token_type=None):
return PopToken(tokens, token_type)
head = PopFront(tokens)
t = head.token_type
if t == 'code':
return RawCodeNode(head)
elif t == '$var':
id_token = Pop('id')
Pop('=')
next = PeekToken(tokens)
if next.token_type == 'exp':
exp_token = Pop()
return VarNode(id_token, ParseExpNode(exp_token))
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return VarNode(id_token, code_node)
elif t == '$for':
id_token = Pop('id')
next_token = PeekToken(tokens)
if next_token.token_type == 'code':
sep_token = next_token
Pop('code')
else:
sep_token = None
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
return ForNode(id_token, sep_token, code_node)
elif t == '$if':
exp_token = Pop('code')
Pop('[[')
code_node = ParseCodeNode(tokens)
Pop(']]')
else_node = ParseElseNode(tokens)
return IfNode(ParseExpNode(exp_token), code_node, else_node)
elif t == '$range':
id_token = Pop('id')
exp1_token = Pop('exp')
Pop('..')
exp2_token = Pop('exp')
return RangeNode(id_token, ParseExpNode(exp1_token),
ParseExpNode(exp2_token))
elif t == '$id':
return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id'))
elif t == '$($)':
return LiteralDollarNode(head)
elif t == '$':
exp_token = Pop('exp')
return ParseExpNode(exp_token)
elif t == '[[':
code_node = ParseCodeNode(tokens)
Pop(']]')
return code_node
else:
PushFront(tokens, head)
return None
def ParseCodeNode(tokens):
atomic_code_list = []
while True:
if not tokens:
break
atomic_code_node = ParseAtomicCodeNode(tokens)
if atomic_code_node:
atomic_code_list.append(atomic_code_node)
else:
break
return CodeNode(atomic_code_list)
def ParseToAST(pump_src_text):
"""Convert the given Pump source text into an AST."""
tokens = list(Tokenize(pump_src_text))
code_node = ParseCodeNode(tokens)
return code_node
class Env:
def __init__(self):
self.variables = []
self.ranges = []
def Clone(self):
clone = Env()
clone.variables = self.variables[:]
clone.ranges = self.ranges[:]
return clone
def PushVariable(self, var, value):
# If value looks like an int, store it as an int.
try:
int_value = int(value)
if ('%s' % int_value) == value:
value = int_value
except Exception:
pass
self.variables[:0] = [(var, value)]
def PopVariable(self):
self.variables[:1] = []
def PushRange(self, var, lower, upper):
self.ranges[:0] = [(var, lower, upper)]
def PopRange(self):
self.ranges[:1] = []
def GetValue(self, identifier):
for (var, value) in self.variables:
if identifier == var:
return value
print 'ERROR: meta variable %s is undefined.' % (identifier,)
sys.exit(1)
def EvalExp(self, exp):
try:
result = eval(exp.python_exp)
except Exception, e:
print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e)
print ('ERROR: failed to evaluate meta expression %s at %s' %
(exp.python_exp, exp.token.start))
sys.exit(1)
return result
def GetRange(self, identifier):
for (var, lower, upper) in self.ranges:
if identifier == var:
return (lower, upper)
print 'ERROR: range %s is undefined.' % (identifier,)
sys.exit(1)
class Output:
def __init__(self):
self.string = ''
def GetLastLine(self):
index = self.string.rfind('\n')
if index < 0:
return ''
return self.string[index + 1:]
def Append(self, s):
self.string += s
def RunAtomicCode(env, node, output):
if isinstance(node, VarNode):
identifier = node.identifier.value.strip()
result = Output()
RunAtomicCode(env.Clone(), node.atomic_code, result)
value = result.string
env.PushVariable(identifier, value)
elif isinstance(node, RangeNode):
identifier = node.identifier.value.strip()
lower = int(env.EvalExp(node.exp1))
upper = int(env.EvalExp(node.exp2))
env.PushRange(identifier, lower, upper)
elif isinstance(node, ForNode):
identifier = node.identifier.value.strip()
if node.sep is None:
sep = ''
else:
sep = node.sep.value
(lower, upper) = env.GetRange(identifier)
for i in range(lower, upper + 1):
new_env = env.Clone()
new_env.PushVariable(identifier, i)
RunCode(new_env, node.code, output)
if i != upper:
output.Append(sep)
elif isinstance(node, RawCodeNode):
output.Append(node.raw_code.value)
elif isinstance(node, IfNode):
cond = env.EvalExp(node.exp)
if cond:
RunCode(env.Clone(), node.then_branch, output)
elif node.else_branch is not None:
RunCode(env.Clone(), node.else_branch, output)
elif isinstance(node, ExpNode):
value = env.EvalExp(node)
output.Append('%s' % (value,))
elif isinstance(node, LiteralDollarNode):
output.Append('$')
elif isinstance(node, CodeNode):
RunCode(env.Clone(), node, output)
else:
print 'BAD'
print node
sys.exit(1)
def RunCode(env, code_node, output):
for atomic_code in code_node.atomic_code:
RunAtomicCode(env, atomic_code, output)
def IsComment(cur_line):
return '//' in cur_line
def IsInPreprocessorDirevative(prev_lines, cur_line):
if cur_line.lstrip().startswith('#'):
return True
return prev_lines != [] and prev_lines[-1].endswith('\\')
def WrapComment(line, output):
loc = line.find('//')
before_comment = line[:loc].rstrip()
if before_comment == '':
indent = loc
else:
output.append(before_comment)
indent = len(before_comment) - len(before_comment.lstrip())
prefix = indent*' ' + '// '
max_len = 80 - len(prefix)
comment = line[loc + 2:].strip()
segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != '']
cur_line = ''
for seg in segs:
if len((cur_line + seg).rstrip()) < max_len:
cur_line += seg
else:
if cur_line.strip() != '':
output.append(prefix + cur_line.rstrip())
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapCode(line, line_concat, output):
indent = len(line) - len(line.lstrip())
prefix = indent*' ' # Prefix of the current line
max_len = 80 - indent - len(line_concat) # Maximum length of the current line
new_prefix = prefix + 4*' ' # Prefix of a continuation line
new_max_len = max_len - 4 # Maximum length of a continuation line
# Prefers to wrap a line after a ',' or ';'.
segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != '']
cur_line = '' # The current line without leading spaces.
for seg in segs:
# If the line is still too long, wrap at a space.
while cur_line == '' and len(seg.strip()) > max_len:
seg = seg.lstrip()
split_at = seg.rfind(' ', 0, max_len)
output.append(prefix + seg[:split_at].strip() + line_concat)
seg = seg[split_at + 1:]
prefix = new_prefix
max_len = new_max_len
if len((cur_line + seg).rstrip()) < max_len:
cur_line = (cur_line + seg).lstrip()
else:
output.append(prefix + cur_line.rstrip() + line_concat)
prefix = new_prefix
max_len = new_max_len
cur_line = seg.lstrip()
if cur_line.strip() != '':
output.append(prefix + cur_line.strip())
def WrapPreprocessorDirevative(line, output):
WrapCode(line, ' \\', output)
def WrapPlainCode(line, output):
WrapCode(line, '', output)
def IsHeaderGuardOrInclude(line):
return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or
re.match(r'^#include\s', line))
def WrapLongLine(line, output):
line = line.rstrip()
if len(line) <= 80:
output.append(line)
elif IsComment(line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapComment(line, output)
elif IsInPreprocessorDirevative(output, line):
if IsHeaderGuardOrInclude(line):
# The style guide made an exception to allow long header guard lines
# and includes.
output.append(line)
else:
WrapPreprocessorDirevative(line, output)
else:
WrapPlainCode(line, output)
def BeautifyCode(string):
lines = string.splitlines()
output = []
for line in lines:
WrapLongLine(line, output)
output2 = [line.rstrip() for line in output]
return '\n'.join(output2) + '\n'
def ConvertFromPumpSource(src_text):
"""Return the text generated from the given Pump source text."""
ast = ParseToAST(StripMetaComments(src_text))
output = Output()
RunCode(Env(), ast, output)
return BeautifyCode(output.string)
def main(argv):
if len(argv) == 1:
print __doc__
sys.exit(1)
file_path = argv[-1]
output_str = ConvertFromPumpSource(file(file_path, 'r').read())
if file_path.endswith('.pump'):
output_file_path = file_path[:-5]
else:
output_file_path = '-'
if output_file_path == '-':
print output_str,
else:
output_file = file(output_file_path, 'w')
output_file.write('// This file was GENERATED by command:\n')
output_file.write('// %s %s\n' %
(os.path.basename(__file__), os.path.basename(file_path)))
output_file.write('// DO NOT EDIT BY HAND!!!\n\n')
output_file.write(output_str)
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
bsd-3-clause
|
maverickhuenlam/blog
|
node_modules/grunt-docker/node_modules/docker/node_modules/pygmentize-bundled/vendor/pygments/build-2.7/pygments/styles/emacs.py
|
364
|
2486
|
# -*- coding: utf-8 -*-
"""
pygments.styles.emacs
~~~~~~~~~~~~~~~~~~~~~
A highlighting style for Pygments, inspired by Emacs.
:copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class EmacsStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #008800",
Comment.Preproc: "noitalic",
Comment.Special: "noitalic bold",
Keyword: "bold #AA22FF",
Keyword.Pseudo: "nobold",
Keyword.Type: "bold #00BB00",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#AA22FF",
Name.Function: "#00A000",
Name.Class: "#0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#B8860B",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#BB4444",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BB4444",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
String.Symbol: "#B8860B",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
mit
|
total-impact/total-impact-core
|
test/unit_tests/providers/test_webpage.py
|
4
|
3437
|
# -*- coding: utf-8 -*- # need this line because test utf-8 strings later
import os
import collections
from test.unit_tests.providers import common
from test.unit_tests.providers.common import ProviderTestCase
from totalimpact.providers.provider import Provider, ProviderContentMalformedError
from test.utils import slow, http
from nose.tools import assert_equals, raises
datadir = os.path.join(os.path.split(__file__)[0], "../../../extras/sample_provider_pages/webpage")
SAMPLE_EXTRACT_BIBLIO_PAGE = os.path.join(datadir, "biblio")
class TestWebpage(ProviderTestCase):
provider_name = "webpage"
testitem_aliases = ("url", "http://nescent.org")
testitem_biblio = ("url", "http://nescent.org")
testitem_members = "http://nescent.org\nhttp://blenz.ca\nhttps://heroku.com"
def setUp(self):
ProviderTestCase.setUp(self)
def test_is_relevant_alias(self):
# ensure that it matches an appropriate ids
assert_equals(self.provider.is_relevant_alias(self.testitem_aliases), True)
assert_equals(self.provider.is_relevant_alias(("doi", "NOT A GITHUB ID")), False)
def test_extract_biblio(self):
f = open(SAMPLE_EXTRACT_BIBLIO_PAGE, "r")
ret = self.provider._extract_biblio(f.read())
expected = {'h1': u'WELCOME', 'title': u'NESCent: The National Evolutionary Synthesis Center'}
assert_equals(ret, expected)
def test_extract_biblio_russian(self):
#from http://www.youtube.com/watch?v=9xBmU0TPZC4
page = """<html><head><title>День города. Донецк 2010 - YouTube</title></head>
<body>
<h1 id="watch-headline-title">
День города. Донецк 2010
</h1>
</body></html>"""
ret = self.provider._extract_biblio(page)
expected = {'h1': u'День города. Донецк 2010', 'title': u"День города. Донецк 2010 - YouTube"}
assert_equals(ret, expected)
# override common because does not raise errors, unlike most other providers
def test_provider_biblio_400(self):
Provider.http_get = common.get_400
biblio = self.provider.biblio([self.testitem_biblio])
assert_equals(biblio, {})
# override comon because does not raise errors, unlike most other providers
def test_provider_biblio_500(self):
Provider.http_get = common.get_500
biblio = self.provider.biblio([self.testitem_biblio])
assert_equals(biblio, {})
def test_member_items(self):
ret = self.provider.member_items(self.testitem_members)
expected = [('url', 'http://nescent.org'), ('url', 'http://blenz.ca'), ('url', 'https://heroku.com')]
assert_equals(ret, expected)
def test_provider_member_items_400(self):
pass
def test_provider_member_items_500(self):
pass
def test_provider_member_items_empty(self):
pass
def test_provider_member_items_nonsense_txt(self):
pass
def test_provider_member_items_nonsense_xml(self):
pass
@http
def test_biblio(self):
ret = self.provider.biblio([("url", "http://www.digitalhumanities.org/dhq/vol/2/1/000019/000019.html")])
expected = {'title': u'DHQ: Digital Humanities Quarterly: As You Can See: Applying Visual Collaborative Filtering to Works of Art'}
assert_equals(ret, expected)
|
mit
|
Jollytown/Garuda
|
server/garuda/lib/python2.7/site-packages/django/contrib/gis/shortcuts.py
|
197
|
1129
|
import zipfile
from io import BytesIO
from django.conf import settings
from django.http import HttpResponse
from django.template import loader
def compress_kml(kml):
"Returns compressed KMZ from the given KML string."
kmz = BytesIO()
zf = zipfile.ZipFile(kmz, 'a', zipfile.ZIP_DEFLATED)
zf.writestr('doc.kml', kml.encode(settings.DEFAULT_CHARSET))
zf.close()
kmz.seek(0)
return kmz.read()
def render_to_kml(*args, **kwargs):
"Renders the response as KML (using the correct MIME type)."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='application/vnd.google-earth.kml+xml')
def render_to_kmz(*args, **kwargs):
"""
Compresses the KML content and returns as KMZ (using the correct
MIME type).
"""
return HttpResponse(compress_kml(loader.render_to_string(*args, **kwargs)),
content_type='application/vnd.google-earth.kmz')
def render_to_text(*args, **kwargs):
"Renders the response using the MIME type for plain text."
return HttpResponse(loader.render_to_string(*args, **kwargs),
content_type='text/plain')
|
mit
|
buaabyl/lime_dict
|
ngram-chs/test_model.py
|
1
|
2375
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#
# GNU GENERAL PUBLIC LICENSE
# Version 2, June 1991
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
#
# @ref https://stanfordnlp.github.io/CoreNLP/
#
# TODO: choose more effective database, sqlite3 is not suitable,
# leveldb is too complex...
#
# @author william bao
# 0.0.1 unigram and bigram model for testing
#
#
import sys
import os
import re
import getopt
import glob
import json
import math
import sqlite3
import time
def file_put_json(dbname, d):
jstr = json.dumps(d, indent=4)
f = open(dbname, 'w', encoding='utf-8')
f.write(jstr)
f.close()
def file_get_json(dbname):
f = open(dbname, 'r', encoding='utf-8')
jstr = f.read()
f.close()
db = json.loads(jstr)
return db
if __name__ == '__main__':
dbname = 'ngram_chs.db'
db = sqlite3.connect(dbname)
cur = db.cursor()
piyins = 'zhe shi yi ge jian dan de ce shi yong li'
s = '这 是 一个 简单的 测试 用例'
tokens = s.split(' ')
i = 0
n = len(tokens)
l = []
while i < n:
token = tokens[i]
l.append(token)
print('INPUT:', ' '.join(l), '[ ]')
t1 = time.time()
res = cur.execute('SELECT freq FROM unigram')
t2 = time.time()
if res:
freq = res.fetchone()[0]
res = cur.execute('SELECT phrase1, logp FROM bigram WHERE phrase0 = ? ORDER BY logp ASC LIMIT 20', [token])
t3 = time.time()
if res:
for row in res.fetchall():
print('', row)
print('* unigram', t2-t1, 'seconds')
print('* bigram', t3-t2, 'seconds')
i = i + 1
print()
|
gpl-2.0
|
ostroproject/meta-iotqa
|
lib/oeqa/runtime/nodejs/bleno.py
|
1
|
3666
|
#!/usr/bin/env python3
import os
import sys
import time
import json
import shutil
import subprocess
from oeqa.oetest import oeRuntimeTest
from oeqa.utils.decorators import tag
class BlenoTest(oeRuntimeTest):
cleanup = False
bleno_prefix_dir = '/home/root'
def setUp(self):
'''
Install bleno on the target device.
'''
self.clean_up_dirs()
print('\nInstalling bleno on the target...')
install_bleno_cmd = 'cd {prefix};npm install bleno'.format(
prefix = self.bleno_prefix_dir)
(status, output) = self.target.run(install_bleno_cmd)
if status != 0:
sys.stderr.write('Failed to install bleno on the target device.')
return
print('Installing bleno on the target: done.')
print('Installing bleno devDependencies for test...')
npm_install_cmd = 'cd {prefix}/node_modules/bleno;npm install'.format(
prefix = self.bleno_prefix_dir)
(status, output) = self.target.run(npm_install_cmd)
if status != 0:
sys.stderr.write('Failed to install bleno devDependencies for test.')
return
print('Installing bleno devDependencies for test: done.')
update_mocha_test_cmd = 'cd {prefix}/node_modules/bleno;'.format(
prefix = self.bleno_prefix_dir)
update_mocha_test_cmd += 'sed -i -e "s|-R spec test/\*.js|'
update_mocha_test_cmd += '-R json test/\*.js > ../bleno.log|" package.json'
print(update_mocha_test_cmd)
self.target.run(update_mocha_test_cmd)
@tag(CasesNumber = 23)
def test_bleno(self):
'''
Run the bleno test cases on the target device.
'''
test_cmd = 'cd {prefix}/node_modules/bleno;npm test'.format(
prefix = self.bleno_prefix_dir)
self.target.run(test_cmd)
cat_bleno_log_cmd = 'cat {prefix}/node_modules/bleno.log'.format(
prefix = self.bleno_prefix_dir)
(status, output) = self.target.run(cat_bleno_log_cmd)
self.parse_bleno_test_log(output)
def parse_bleno_test_log(self, output):
'''
Parse the json-formatted test results log.
'''
try:
result_json = json.loads(output.strip())
except Exception:
sys.stderr.write('Invalid JSON format results.')
return
with open('result-bleno.log', 'w') as fp:
for passed_tc in result_json.get('passes'):
fp.write('{t} - runtest.py - RESULTS - ' \
'Testcase {tc_name}: {result}\n'.format(
t = time.strftime('%H:%M:%S', time.localtime()),
tc_name = '{tc}'.format(tc = passed_tc.get('fullTitle').replace(' ', '_')),
result = 'PASSED'))
for failed_tc in result_json.get('failures'):
fp.write('{t} - runtest.py - RESULTS - ' \
'Testcase {tc_name}: {result}\n'.format(
t = time.strftime('%H:%M:%S', time.localtime()),
tc_name = '{tc}'.format(tc = failed_tc.get('fullTitle').replace(' ', '_')),
result = 'FAILED'))
def clean_up_dirs(self):
'''
Remove any bleno directory if it already exists on the target device.
'''
if self.cleanup:
self.target.run('rm -fr ~/node_modules/bleno')
def tearDown(self):
'''
Clean up work.
'''
self.clean_up_dirs();
|
mit
|
snailbob/namebench
|
nb_third_party/dns/edns.py
|
248
|
4312
|
# Copyright (C) 2009 Nominum, Inc.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose with or without fee is hereby granted,
# provided that the above copyright notice and this permission notice
# appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""EDNS Options"""
NSID = 3
class Option(object):
"""Base class for all EDNS option types.
"""
def __init__(self, otype):
"""Initialize an option.
@param rdtype: The rdata type
@type rdtype: int
"""
self.otype = otype
def to_wire(self, file):
"""Convert an option to wire format.
"""
raise NotImplementedError
def from_wire(cls, otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
raise NotImplementedError
from_wire = classmethod(from_wire)
def _cmp(self, other):
"""Compare an ENDS option with another option of the same type.
Return < 0 if self < other, 0 if self == other, and > 0 if self > other.
"""
raise NotImplementedError
def __eq__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) == 0
def __ne__(self, other):
if not isinstance(other, Option):
return False
if self.otype != other.otype:
return False
return self._cmp(other) != 0
def __lt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) < 0
def __le__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) <= 0
def __ge__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) >= 0
def __gt__(self, other):
if not isinstance(other, Option) or \
self.otype != other.otype:
return NotImplemented
return self._cmp(other) > 0
class GenericOption(Option):
"""Generate Rdata Class
This class is used for EDNS option types for which we have no better
implementation.
"""
def __init__(self, otype, data):
super(GenericOption, self).__init__(otype)
self.data = data
def to_wire(self, file):
file.write(self.data)
def from_wire(cls, otype, wire, current, olen):
return cls(otype, wire[current : current + olen])
from_wire = classmethod(from_wire)
def _cmp(self, other):
return cmp(self.data, other.data)
_type_to_class = {
}
def get_option_class(otype):
cls = _type_to_class.get(otype)
if cls is None:
cls = GenericOption
return cls
def option_from_wire(otype, wire, current, olen):
"""Build an EDNS option object from wire format
@param otype: The option type
@type otype: int
@param wire: The wire-format message
@type wire: string
@param current: The offet in wire of the beginning of the rdata.
@type current: int
@param olen: The length of the wire-format option data
@type olen: int
@rtype: dns.ends.Option instance"""
cls = get_option_class(otype)
return cls.from_wire(otype, wire, current, olen)
|
apache-2.0
|
jiachenning/odoo
|
addons/account_followup/__openerp__.py
|
261
|
2938
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Payment Follow-up Management',
'version': '1.0',
'category': 'Accounting & Finance',
'description': """
Module to automate letters for unpaid invoices, with multi-level recalls.
=========================================================================
You can define your multiple levels of recall through the menu:
---------------------------------------------------------------
Configuration / Follow-up / Follow-up Levels
Once it is defined, you can automatically print recalls every day through simply clicking on the menu:
------------------------------------------------------------------------------------------------------
Payment Follow-Up / Send Email and letters
It will generate a PDF / send emails / set manual actions according to the the different levels
of recall defined. You can define different policies for different companies.
Note that if you want to check the follow-up level for a given partner/account entry, you can do from in the menu:
------------------------------------------------------------------------------------------------------------------
Reporting / Accounting / **Follow-ups Analysis
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/billing',
'depends': ['account_accountant', 'mail'],
'data': [
'security/account_followup_security.xml',
'security/ir.model.access.csv',
'report/account_followup_report.xml',
'account_followup_data.xml',
'account_followup_view.xml',
'account_followup_customers.xml',
'wizard/account_followup_print_view.xml',
'res_config_view.xml',
'views/report_followup.xml',
'account_followup_reports.xml'
],
'demo': ['account_followup_demo.xml'],
'test': [
'test/account_followup.yml',
],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
kongseokhwan/kulcloud-iitp-neutron
|
neutron/tests/functional/api/test_policies.py
|
6
|
3938
|
# Copyright (c) 2014 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import os.path
from neutron import context
from neutron import policy
from neutron.api import extensions
from neutron.api.v2 import attributes
from neutron.tests import base
TEST_PATH = os.path.dirname(os.path.abspath(__file__))
class APIPolicyTestCase(base.BaseTestCase):
"""
Tests for REST API policy checks. Ideally this would be done against an
environment with an instantiated plugin, but there appears to be problems
with instantiating a plugin against an sqlite environment and as yet, there
is no precedent for running a functional test against an actual database
backend.
"""
api_version = "2.0"
def setUp(self):
super(APIPolicyTestCase, self).setUp()
self.ATTRIBUTE_MAP_COPY = copy.copy(attributes.RESOURCE_ATTRIBUTE_MAP)
self.extension_path = os.path.abspath(os.path.join(
TEST_PATH, "../../../extensions"))
policy.reset()
def _network_definition(self):
return {'name': 'test_network',
'ports': [],
'subnets': [],
'status': 'up',
'admin_state_up': True,
'shared': False,
'tenant_id': 'admin',
'id': 'test_network',
'router:external': True}
def _check_external_router_policy(self, context):
return policy.check(context, 'get_network', self._network_definition())
def test_premature_loading(self):
"""
Verifies that loading policies by way of admin context before
populating extensions and extending the resource map results in
networks with router:external is true being invisible to regular
tenants.
"""
extension_manager = extensions.ExtensionManager(self.extension_path)
admin_context = context.get_admin_context()
tenant_context = context.Context('test_user', 'test_tenant_id', False)
extension_manager.extend_resources(self.api_version,
attributes.RESOURCE_ATTRIBUTE_MAP)
self.assertEqual(self._check_external_router_policy(admin_context),
True)
self.assertEqual(self._check_external_router_policy(tenant_context),
False)
def test_proper_load_order(self):
"""
Verifies that loading policies by way of admin context after
populating extensions and extending the resource map results in
networks with router:external are visible to regular tenants.
"""
extension_manager = extensions.ExtensionManager(self.extension_path)
extension_manager.extend_resources(self.api_version,
attributes.RESOURCE_ATTRIBUTE_MAP)
admin_context = context.get_admin_context()
tenant_context = context.Context('test_user', 'test_tenant_id', False)
self.assertEqual(self._check_external_router_policy(admin_context),
True)
self.assertEqual(self._check_external_router_policy(tenant_context),
True)
def tearDown(self):
if self.ATTRIBUTE_MAP_COPY:
attributes.RESOURCE_ATTRIBUTE_MAP = self.ATTRIBUTE_MAP_COPY
super(APIPolicyTestCase, self).tearDown()
|
apache-2.0
|
mspark93/VTK
|
ThirdParty/AutobahnPython/autobahn/wamp/role.py
|
17
|
6212
|
###############################################################################
##
## Copyright (C) 2014 Tavendo GmbH
##
## Licensed under the Apache License, Version 2.0 (the "License");
## you may not use this file except in compliance with the License.
## You may obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
###############################################################################
from __future__ import absolute_import
__all__ = ['RoleFeatures',
'RoleBrokerFeatures',
'RoleSubscriberFeatures',
'RolePublisherFeatures',
'RoleDealerFeatures',
'RoleCallerFeatures',
'RoleCalleeFeatures',
'ROLE_NAME_TO_CLASS']
import json, types
from autobahn import util
from autobahn.wamp.exception import ProtocolError
class RoleFeatures(util.EqualityMixin):
ROLE = None
def __str__(self):
return json.dumps(self.__dict__)
def _check_all_bool(self):
## check feature attributes
for k in self.__dict__:
if not k.startswith('_') and k != 'ROLE':
if getattr(self, k) is not None and type(getattr(self, k)) != bool:
raise ProtocolError("invalid type {} for feature '{}' for role '{}'".format(getattr(self, k), k, self.ROLE))
class RoleCommonPubSubFeatures(RoleFeatures):
def __init__(self,
publisher_identification = None,
partitioned_pubsub = None):
self.publisher_identification = publisher_identification
self.partitioned_pubsub = partitioned_pubsub
class RoleBrokerFeatures(RoleCommonPubSubFeatures):
ROLE = u'broker'
def __init__(self,
subscriber_blackwhite_listing = None,
publisher_exclusion = None,
publication_trustlevels = None,
pattern_based_subscription = None,
subscriber_metaevents = None,
subscriber_list = None,
event_history = None,
**kwargs):
self.subscriber_blackwhite_listing = subscriber_blackwhite_listing
self.publisher_exclusion = publisher_exclusion
self.publication_trustlevels = publication_trustlevels
self.pattern_based_subscription = pattern_based_subscription
self.subscriber_metaevents = subscriber_metaevents
self.subscriber_list = subscriber_list
self.event_history = event_history
RoleCommonPubSubFeatures.__init__(self, **kwargs)
self._check_all_bool()
class RoleSubscriberFeatures(RoleCommonPubSubFeatures):
ROLE = u'subscriber'
def __init__(self,
publication_trustlevels = None,
pattern_based_subscription = None,
subscriber_metaevents = None,
subscriber_list = None,
event_history = None,
**kwargs):
self.publication_trustlevels = publication_trustlevels
self.pattern_based_subscription = pattern_based_subscription
self.subscriber_metaevents = subscriber_metaevents
self.subscriber_list = subscriber_list
self.event_history = event_history
RoleCommonPubSubFeatures.__init__(self, **kwargs)
self._check_all_bool()
class RolePublisherFeatures(RoleCommonPubSubFeatures):
ROLE = u'publisher'
def __init__(self,
subscriber_blackwhite_listing = None,
publisher_exclusion = None,
**kwargs):
self.subscriber_blackwhite_listing = subscriber_blackwhite_listing
self.publisher_exclusion = publisher_exclusion
RoleCommonPubSubFeatures.__init__(self, **kwargs)
self._check_all_bool()
class RoleCommonRpcFeatures(RoleFeatures):
def __init__(self,
caller_identification = None,
partitioned_rpc = None,
call_timeout = None,
call_canceling = None,
progressive_call_results = None):
self.caller_identification = caller_identification
self.partitioned_rpc = partitioned_rpc
self.call_timeout = call_timeout
self.call_canceling = call_canceling
self.progressive_call_results = progressive_call_results
class RoleDealerFeatures(RoleCommonRpcFeatures):
ROLE = u'dealer'
def __init__(self,
callee_blackwhite_listing = None,
caller_exclusion = None,
call_trustlevels = None,
pattern_based_registration = None,
**kwargs):
self.callee_blackwhite_listing = callee_blackwhite_listing
self.caller_exclusion = caller_exclusion
self.call_trustlevels = call_trustlevels
self.pattern_based_registration = pattern_based_registration
RoleCommonRpcFeatures.__init__(self, **kwargs)
self._check_all_bool()
class RoleCallerFeatures(RoleCommonRpcFeatures):
ROLE = u'caller'
def __init__(self,
callee_blackwhite_listing = None,
caller_exclusion = None,
**kwargs):
self.callee_blackwhite_listing = callee_blackwhite_listing
self.caller_exclusion = caller_exclusion
RoleCommonRpcFeatures.__init__(self, **kwargs)
self._check_all_bool()
class RoleCalleeFeatures(RoleCommonRpcFeatures):
ROLE = u'callee'
def __init__(self,
call_trustlevels = None,
pattern_based_registration = None,
**kwargs):
self.call_trustlevels = call_trustlevels
self.pattern_based_registration = pattern_based_registration
RoleCommonRpcFeatures.__init__(self, **kwargs)
self._check_all_bool()
ROLE_NAME_TO_CLASS = {
u'broker': RoleBrokerFeatures,
u'subscriber': RoleSubscriberFeatures,
u'publisher': RolePublisherFeatures,
u'dealer': RoleDealerFeatures,
u'caller': RoleCallerFeatures,
u'callee': RoleCalleeFeatures,
}
|
bsd-3-clause
|
mboersma/docker-registry
|
scripts/create_ancestry.py
|
38
|
4259
|
#!/usr/bin/env python
from __future__ import print_function
import hashlib
import sys
import simplejson as json
from docker_registry.core import exceptions
import docker_registry.storage as storage
store = storage.load()
images_cache = {}
ancestry_cache = {}
dry_run = True
def warning(msg):
print('# Warning: ' + msg, file=sys.stderr)
def get_image_parent(image_id):
if image_id in images_cache:
return images_cache[image_id]
image_json = store.image_json_path(image_id)
parent_id = None
try:
# Note(dmp): unicode patch
info = store.get_json(image_json)
if info['id'] != image_id:
warning('image_id != json image_id for image_id: ' + image_id)
parent_id = info.get('parent')
except exceptions.FileNotFoundError:
warning('graph is broken for image_id: {0}'.format(image_id))
images_cache[image_id] = parent_id
return parent_id
def create_image_ancestry(image_id):
global ancestry_cache
if image_id in ancestry_cache:
# We already generated the ancestry for that one
return
ancestry = [image_id]
parent_id = image_id
while True:
parent_id = get_image_parent(parent_id)
if not parent_id:
break
ancestry.append(parent_id)
create_image_ancestry(parent_id)
ancestry_path = store.image_ancestry_path(image_id)
if dry_run is False:
if not store.exists(ancestry_path):
store.put_content(ancestry_path, json.dumps(ancestry))
ancestry_cache[image_id] = True
print('Generated ancestry (size: {0}) '
'for image_id: {1}'.format(len(ancestry), image_id))
def resolve_all_tags():
for namespace in store.list_directory(store.repositories):
for repos in store.list_directory(namespace):
try:
for tag in store.list_directory(repos):
fname = tag.split('/').pop()
if not fname.startswith('tag_'):
continue
yield store.get_content(tag)
except exceptions.FileNotFoundError:
pass
def compute_image_checksum(image_id, json_data):
layer_path = store.image_layer_path(image_id)
if not store.exists(layer_path):
warning('{0} is broken (no layer)'.format(image_id))
return
print('Writing checksum for {0}'.format(image_id))
if dry_run:
return
h = hashlib.sha256(json_data + '\n')
for buf in store.stream_read(layer_path):
h.update(buf)
checksum = 'sha256:{0}'.format(h.hexdigest())
checksum_path = store.image_checksum_path(image_id)
store.put_content(checksum_path, checksum)
def load_image_json(image_id):
try:
json_path = store.image_json_path(image_id)
json_data = store.get_content(json_path)
# Note(dmp): unicode patch
info = json.loads(json_data.decode('utf8'))
if image_id != info['id']:
warning('{0} is broken (json\'s id mismatch)'.format(image_id))
return
return json_data
except (IOError, exceptions.FileNotFoundError, json.JSONDecodeError):
warning('{0} is broken (invalid json)'.format(image_id))
def compute_missing_checksums():
for image in store.list_directory(store.images):
image_id = image.split('/').pop()
if image_id not in ancestry_cache:
warning('{0} is orphan'.format(image_id))
json_data = load_image_json(image_id)
if not json_data:
continue
checksum_path = store.image_checksum_path(image_id)
if store.exists(checksum_path):
# Checksum already there, skipping
continue
compute_image_checksum(image_id, json_data)
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == '--seriously':
dry_run = False
for image_id in resolve_all_tags():
create_image_ancestry(image_id)
compute_missing_checksums()
if dry_run:
print('-------')
print('/!\ No modification has been made (dry-run)')
print('/!\ In order to apply the changes, re-run with:')
print('$ {0} --seriously'.format(sys.argv[0]))
else:
print('# Changes applied.')
|
apache-2.0
|
upgrades-migrations/preupgrade-assistant
|
tests/test_generation.py
|
2
|
2933
|
from __future__ import unicode_literals
import unittest
import tempfile
import shutil
import os
from glob import glob
from preupg.xmlgen.compose import XCCDFCompose, ComposeXML
from preupg.utils import FileHelper
from preupg import settings
try:
import base
except ImportError:
import tests.base as base
FOO_DIR = 'FOOBAR6_7'
FOO_RESULTS = FOO_DIR + settings.results_postfix
class TestContentGenerate(base.TestCase):
dir_name = None
result_dir = None
def setUp(self):
self.temp_dir = tempfile.mkdtemp(prefix='preupgrade', dir='/tmp')
self.dir_name = os.path.join(os.getcwd(), 'tests', FOO_DIR)
self.result_dir = os.path.join(self.temp_dir, 'tests', FOO_RESULTS)
shutil.copytree(self.dir_name, os.path.join(self.temp_dir, FOO_DIR))
self.dir_name = os.path.join(self.temp_dir, FOO_DIR)
def tearDown(self):
shutil.rmtree(self.temp_dir)
for d, subd, file_name in os.walk(self.dir_name):
group_xml = [x for x in file_name if x == 'group.xml']
if group_xml:
os.unlink(os.path.join(d, group_xml[0]))
def test_compose(self):
ComposeXML().collect_group_xmls(self.dir_name, self.dir_name)
for subdir in glob(os.path.join(self.dir_name, "*/")):
self.assertTrue(os.path.exists(os.path.join(subdir, 'group.xml')))
self.assertFalse(os.path.exists(
os.path.join(subdir, settings.all_xccdf_xml_filename)))
class TestGlobalContent(base.TestCase):
temp_dir = None
dir_name = None
result_dir = None
def setUp(self):
self.temp_dir = tempfile.mkdtemp(prefix='preupgrade', dir='/tmp')
self.dir_name = os.path.join(os.getcwd(), 'tests', FOO_DIR)
self.result_dir = os.path.join(self.temp_dir, FOO_DIR + '-results')
shutil.copytree(self.dir_name, os.path.join(self.temp_dir, FOO_DIR))
self.data_dir_orig = settings.data_dir
settings.data_dir = os.path.join(os.getcwd(), "data")
def tearDown(self):
shutil.rmtree(self.temp_dir)
settings.data_dir = self.data_dir_orig
def test_final_compose(self):
dir_name = os.path.join(self.temp_dir, FOO_DIR)
ComposeXML().collect_group_xmls(dir_name, dir_name)
xccdf_compose = XCCDFCompose(os.path.join(self.temp_dir, FOO_DIR))
xccdf_compose.generate_xml()
all_xccdf = os.path.join(self.result_dir,
settings.all_xccdf_xml_filename)
self.assertTrue(os.path.exists(all_xccdf))
dummy_lines = FileHelper.get_file_content(all_xccdf, 'rb')
def suite():
loader = unittest.TestLoader()
suite = unittest.TestSuite()
suite.addTest(loader.loadTestsFromTestCase(TestContentGenerate))
suite.addTest(loader.loadTestsFromTestCase(TestGlobalContent))
return suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=3).run(suite())
|
gpl-3.0
|
drawks/ansible
|
lib/ansible/modules/cloud/smartos/smartos_image_facts.py
|
58
|
2857
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Adam Števko <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: smartos_image_facts
short_description: Get SmartOS image details.
description:
- Retrieve facts about all installed images on SmartOS. Facts will be
inserted to the ansible_facts key.
version_added: "2.2"
author: Adam Števko (@xen0l)
options:
filters:
description:
- Criteria for selecting image. Can be any value from image
manifest and 'published_date', 'published', 'source', 'clones',
and 'size'. More informaton can be found at U(https://smartos.org/man/1m/imgadm)
under 'imgadm list'.
'''
EXAMPLES = '''
# Return facts about all installed images.
- smartos_image_facts:
# Return all private active Linux images.
- smartos_image_facts: filters="os=linux state=active public=false"
# Show, how many clones does every image have.
- smartos_image_facts:
- debug: msg="{{ smartos_images[item]['name'] }}-{{smartos_images[item]['version'] }}
has {{ smartos_images[item]['clones'] }} VM(s)"
with_items: "{{ smartos_images.keys() }}"
'''
RETURN = '''
# this module returns ansible_facts
'''
import json
from ansible.module_utils.basic import AnsibleModule
class ImageFacts(object):
def __init__(self, module):
self.module = module
self.filters = module.params['filters']
def return_all_installed_images(self):
cmd = [self.module.get_bin_path('imgadm')]
cmd.append('list')
cmd.append('-j')
if self.filters:
cmd.append(self.filters)
(rc, out, err) = self.module.run_command(cmd)
if rc != 0:
self.module.exit_json(
msg='Failed to get all installed images', stderr=err)
images = json.loads(out)
result = {}
for image in images:
result[image['manifest']['uuid']] = image['manifest']
# Merge additional attributes with the image manifest.
for attrib in ['clones', 'source', 'zpool']:
result[image['manifest']['uuid']][attrib] = image[attrib]
return result
def main():
module = AnsibleModule(
argument_spec=dict(
filters=dict(default=None),
),
supports_check_mode=False,
)
image_facts = ImageFacts(module)
data = dict(smartos_images=image_facts.return_all_installed_images())
module.exit_json(ansible_facts=data)
if __name__ == '__main__':
main()
|
gpl-3.0
|
AlexanderKaluzhny/instanotifier
|
instanotifier/feedsource/tasks.py
|
1
|
1090
|
import logging
from celery import chain, shared_task
from instanotifier.fetcher.main import fetch
from instanotifier.parser.main import parse_and_save
from instanotifier.publisher.tasks import publish
from instanotifier.feedsource.models import FeedSource
@shared_task
def consume_feed(feedsource_pk):
feed_source = FeedSource.objects.get(pk=feedsource_pk)
fetched_feed = fetch(feed_source.url)
saved_pks = parse_and_save(feed=fetched_feed, feed_source=feed_source)
logging.info(f"Fetched {len(saved_pks)} items from `{feed_source.name}`.")
return saved_pks
@shared_task
def process_feed(feedsource_pk):
task_flow = chain(
consume_feed.s(feedsource_pk=feedsource_pk),
publish.s(feedsource_pk=feedsource_pk),
)
task_flow.delay()
@shared_task
def fetch_all_sources():
feedsource_pks = list(FeedSource.objects.enabled().values_list("pk", flat=True))
for idx, pk in enumerate(feedsource_pks):
process_feed.apply_async(kwargs=dict(feedsource_pk=pk), countdown=idx * 60)
return {"fetch_started": len(feedsource_pks)}
|
mit
|
an7oine/WinVHS
|
Cygwin/lib/python2.7/threading.py
|
15
|
47210
|
"""Thread module emulating a subset of Java's threading model."""
import sys as _sys
try:
import thread
except ImportError:
del _sys.modules[__name__]
raise
import warnings
from collections import deque as _deque
from time import time as _time, sleep as _sleep
from traceback import format_exc as _format_exc
# Note regarding PEP 8 compliant aliases
# This threading model was originally inspired by Java, and inherited
# the convention of camelCase function and method names from that
# language. While those names are not in any imminent danger of being
# deprecated, starting with Python 2.6, the module now provides a
# PEP 8 compliant alias for any such method name.
# Using the new PEP 8 compliant names also facilitates substitution
# with the multiprocessing module, which doesn't provide the old
# Java inspired names.
# Rename some stuff so "from threading import *" is safe
__all__ = ['activeCount', 'active_count', 'Condition', 'currentThread',
'current_thread', 'enumerate', 'Event',
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Thread',
'Timer', 'setprofile', 'settrace', 'local', 'stack_size']
_start_new_thread = thread.start_new_thread
_allocate_lock = thread.allocate_lock
_get_ident = thread.get_ident
ThreadError = thread.error
del thread
# sys.exc_clear is used to work around the fact that except blocks
# don't fully clear the exception until 3.0.
warnings.filterwarnings('ignore', category=DeprecationWarning,
module='threading', message='sys.exc_clear')
# Debug support (adapted from ihooks.py).
# All the major classes here derive from _Verbose. We force that to
# be a new-style class so that all the major classes here are new-style.
# This helps debugging (type(instance) is more revealing for instances
# of new-style classes).
_VERBOSE = False
if __debug__:
class _Verbose(object):
def __init__(self, verbose=None):
if verbose is None:
verbose = _VERBOSE
self.__verbose = verbose
def _note(self, format, *args):
if self.__verbose:
format = format % args
# Issue #4188: calling current_thread() can incur an infinite
# recursion if it has to create a DummyThread on the fly.
ident = _get_ident()
try:
name = _active[ident].name
except KeyError:
name = "<OS thread %d>" % ident
format = "%s: %s\n" % (name, format)
_sys.stderr.write(format)
else:
# Disable this when using "python -O"
class _Verbose(object):
def __init__(self, verbose=None):
pass
def _note(self, *args):
pass
# Support for profile and trace hooks
_profile_hook = None
_trace_hook = None
def setprofile(func):
"""Set a profile function for all threads started from the threading module.
The func will be passed to sys.setprofile() for each thread, before its
run() method is called.
"""
global _profile_hook
_profile_hook = func
def settrace(func):
"""Set a trace function for all threads started from the threading module.
The func will be passed to sys.settrace() for each thread, before its run()
method is called.
"""
global _trace_hook
_trace_hook = func
# Synchronization classes
Lock = _allocate_lock
def RLock(*args, **kwargs):
"""Factory function that returns a new reentrant lock.
A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it again
without blocking; the thread must release it once for each time it has
acquired it.
"""
return _RLock(*args, **kwargs)
class _RLock(_Verbose):
"""A reentrant lock must be released by the thread that acquired it. Once a
thread has acquired a reentrant lock, the same thread may acquire it
again without blocking; the thread must release it once for each time it
has acquired it.
"""
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__block = _allocate_lock()
self.__owner = None
self.__count = 0
def __repr__(self):
owner = self.__owner
try:
owner = _active[owner].name
except KeyError:
pass
return "<%s owner=%r count=%d>" % (
self.__class__.__name__, owner, self.__count)
def acquire(self, blocking=1):
"""Acquire a lock, blocking or non-blocking.
When invoked without arguments: if this thread already owns the lock,
increment the recursion level by one, and return immediately. Otherwise,
if another thread owns the lock, block until the lock is unlocked. Once
the lock is unlocked (not owned by any thread), then grab ownership, set
the recursion level to one, and return. If more than one thread is
blocked waiting until the lock is unlocked, only one at a time will be
able to grab ownership of the lock. There is no return value in this
case.
When invoked with the blocking argument set to true, do the same thing
as when called without arguments, and return true.
When invoked with the blocking argument set to false, do not block. If a
call without an argument would block, return false immediately;
otherwise, do the same thing as when called without arguments, and
return true.
"""
me = _get_ident()
if self.__owner == me:
self.__count = self.__count + 1
if __debug__:
self._note("%s.acquire(%s): recursive success", self, blocking)
return 1
rc = self.__block.acquire(blocking)
if rc:
self.__owner = me
self.__count = 1
if __debug__:
self._note("%s.acquire(%s): initial success", self, blocking)
else:
if __debug__:
self._note("%s.acquire(%s): failure", self, blocking)
return rc
__enter__ = acquire
def release(self):
"""Release a lock, decrementing the recursion level.
If after the decrement it is zero, reset the lock to unlocked (not owned
by any thread), and if any other threads are blocked waiting for the
lock to become unlocked, allow exactly one of them to proceed. If after
the decrement the recursion level is still nonzero, the lock remains
locked and owned by the calling thread.
Only call this method when the calling thread owns the lock. A
RuntimeError is raised if this method is called when the lock is
unlocked.
There is no return value.
"""
if self.__owner != _get_ident():
raise RuntimeError("cannot release un-acquired lock")
self.__count = count = self.__count - 1
if not count:
self.__owner = None
self.__block.release()
if __debug__:
self._note("%s.release(): final release", self)
else:
if __debug__:
self._note("%s.release(): non-final release", self)
def __exit__(self, t, v, tb):
self.release()
# Internal methods used by condition variables
def _acquire_restore(self, count_owner):
count, owner = count_owner
self.__block.acquire()
self.__count = count
self.__owner = owner
if __debug__:
self._note("%s._acquire_restore()", self)
def _release_save(self):
if __debug__:
self._note("%s._release_save()", self)
count = self.__count
self.__count = 0
owner = self.__owner
self.__owner = None
self.__block.release()
return (count, owner)
def _is_owned(self):
return self.__owner == _get_ident()
def Condition(*args, **kwargs):
"""Factory function that returns a new condition variable object.
A condition variable allows one or more threads to wait until they are
notified by another thread.
If the lock argument is given and not None, it must be a Lock or RLock
object, and it is used as the underlying lock. Otherwise, a new RLock object
is created and used as the underlying lock.
"""
return _Condition(*args, **kwargs)
class _Condition(_Verbose):
"""Condition variables allow one or more threads to wait until they are
notified by another thread.
"""
def __init__(self, lock=None, verbose=None):
_Verbose.__init__(self, verbose)
if lock is None:
lock = RLock()
self.__lock = lock
# Export the lock's acquire() and release() methods
self.acquire = lock.acquire
self.release = lock.release
# If the lock defines _release_save() and/or _acquire_restore(),
# these override the default implementations (which just call
# release() and acquire() on the lock). Ditto for _is_owned().
try:
self._release_save = lock._release_save
except AttributeError:
pass
try:
self._acquire_restore = lock._acquire_restore
except AttributeError:
pass
try:
self._is_owned = lock._is_owned
except AttributeError:
pass
self.__waiters = []
def __enter__(self):
return self.__lock.__enter__()
def __exit__(self, *args):
return self.__lock.__exit__(*args)
def __repr__(self):
return "<Condition(%s, %d)>" % (self.__lock, len(self.__waiters))
def _release_save(self):
self.__lock.release() # No state to save
def _acquire_restore(self, x):
self.__lock.acquire() # Ignore saved state
def _is_owned(self):
# Return True if lock is owned by current_thread.
# This method is called only if __lock doesn't have _is_owned().
if self.__lock.acquire(0):
self.__lock.release()
return False
else:
return True
def wait(self, timeout=None):
"""Wait until notified or until a timeout occurs.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method releases the underlying lock, and then blocks until it is
awakened by a notify() or notifyAll() call for the same condition
variable in another thread, or until the optional timeout occurs. Once
awakened or timed out, it re-acquires the lock and returns.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
When the underlying lock is an RLock, it is not released using its
release() method, since this may not actually unlock the lock when it
was acquired multiple times recursively. Instead, an internal interface
of the RLock class is used, which really unlocks it even when it has
been recursively acquired several times. Another internal interface is
then used to restore the recursion level when the lock is reacquired.
"""
if not self._is_owned():
raise RuntimeError("cannot wait on un-acquired lock")
waiter = _allocate_lock()
waiter.acquire()
self.__waiters.append(waiter)
saved_state = self._release_save()
try: # restore state no matter what (e.g., KeyboardInterrupt)
if timeout is None:
waiter.acquire()
if __debug__:
self._note("%s.wait(): got it", self)
else:
# Balancing act: We can't afford a pure busy loop, so we
# have to sleep; but if we sleep the whole timeout time,
# we'll be unresponsive. The scheme here sleeps very
# little at first, longer as time goes on, but never longer
# than 20 times per second (or the timeout time remaining).
endtime = _time() + timeout
delay = 0.0005 # 500 us -> initial delay of 1 ms
while True:
gotit = waiter.acquire(0)
if gotit:
break
remaining = endtime - _time()
if remaining <= 0:
break
delay = min(delay * 2, remaining, .05)
_sleep(delay)
if not gotit:
if __debug__:
self._note("%s.wait(%s): timed out", self, timeout)
try:
self.__waiters.remove(waiter)
except ValueError:
pass
else:
if __debug__:
self._note("%s.wait(%s): got it", self, timeout)
finally:
self._acquire_restore(saved_state)
def notify(self, n=1):
"""Wake up one or more threads waiting on this condition, if any.
If the calling thread has not acquired the lock when this method is
called, a RuntimeError is raised.
This method wakes up at most n of the threads waiting for the condition
variable; it is a no-op if no threads are waiting.
"""
if not self._is_owned():
raise RuntimeError("cannot notify on un-acquired lock")
__waiters = self.__waiters
waiters = __waiters[:n]
if not waiters:
if __debug__:
self._note("%s.notify(): no waiters", self)
return
self._note("%s.notify(): notifying %d waiter%s", self, n,
n!=1 and "s" or "")
for waiter in waiters:
waiter.release()
try:
__waiters.remove(waiter)
except ValueError:
pass
def notifyAll(self):
"""Wake up all threads waiting on this condition.
If the calling thread has not acquired the lock when this method
is called, a RuntimeError is raised.
"""
self.notify(len(self.__waiters))
notify_all = notifyAll
def Semaphore(*args, **kwargs):
"""A factory function that returns a new semaphore.
Semaphores manage a counter representing the number of release() calls minus
the number of acquire() calls, plus an initial value. The acquire() method
blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
return _Semaphore(*args, **kwargs)
class _Semaphore(_Verbose):
"""Semaphores manage a counter representing the number of release() calls
minus the number of acquire() calls, plus an initial value. The acquire()
method blocks if necessary until it can return without making the counter
negative. If not given, value defaults to 1.
"""
# After Tim Peters' semaphore class, but not quite the same (no maximum)
def __init__(self, value=1, verbose=None):
if value < 0:
raise ValueError("semaphore initial value must be >= 0")
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__value = value
def acquire(self, blocking=1):
"""Acquire a semaphore, decrementing the internal counter by one.
When invoked without arguments: if the internal counter is larger than
zero on entry, decrement it by one and return immediately. If it is zero
on entry, block, waiting until some other thread has called release() to
make it larger than zero. This is done with proper interlocking so that
if multiple acquire() calls are blocked, release() will wake exactly one
of them up. The implementation may pick one at random, so the order in
which blocked threads are awakened should not be relied on. There is no
return value in this case.
When invoked with blocking set to true, do the same thing as when called
without arguments, and return true.
When invoked with blocking set to false, do not block. If a call without
an argument would block, return false immediately; otherwise, do the
same thing as when called without arguments, and return true.
"""
rc = False
with self.__cond:
while self.__value == 0:
if not blocking:
break
if __debug__:
self._note("%s.acquire(%s): blocked waiting, value=%s",
self, blocking, self.__value)
self.__cond.wait()
else:
self.__value = self.__value - 1
if __debug__:
self._note("%s.acquire: success, value=%s",
self, self.__value)
rc = True
return rc
__enter__ = acquire
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
"""
with self.__cond:
self.__value = self.__value + 1
if __debug__:
self._note("%s.release: success, value=%s",
self, self.__value)
self.__cond.notify()
def __exit__(self, t, v, tb):
self.release()
def BoundedSemaphore(*args, **kwargs):
"""A factory function that returns a new bounded semaphore.
A bounded semaphore checks to make sure its current value doesn't exceed its
initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
If the semaphore is released too many times it's a sign of a bug. If not
given, value defaults to 1.
Like regular semaphores, bounded semaphores manage a counter representing
the number of release() calls minus the number of acquire() calls, plus an
initial value. The acquire() method blocks if necessary until it can return
without making the counter negative. If not given, value defaults to 1.
"""
return _BoundedSemaphore(*args, **kwargs)
class _BoundedSemaphore(_Semaphore):
"""A bounded semaphore checks to make sure its current value doesn't exceed
its initial value. If it does, ValueError is raised. In most situations
semaphores are used to guard resources with limited capacity.
"""
def __init__(self, value=1, verbose=None):
_Semaphore.__init__(self, value, verbose)
self._initial_value = value
def release(self):
"""Release a semaphore, incrementing the internal counter by one.
When the counter is zero on entry and another thread is waiting for it
to become larger than zero again, wake up that thread.
If the number of releases exceeds the number of acquires,
raise a ValueError.
"""
with self._Semaphore__cond:
if self._Semaphore__value >= self._initial_value:
raise ValueError("Semaphore released too many times")
self._Semaphore__value += 1
self._Semaphore__cond.notify()
def Event(*args, **kwargs):
"""A factory function that returns a new event.
Events manage a flag that can be set to true with the set() method and reset
to false with the clear() method. The wait() method blocks until the flag is
true.
"""
return _Event(*args, **kwargs)
class _Event(_Verbose):
"""A factory function that returns a new event object. An event manages a
flag that can be set to true with the set() method and reset to false
with the clear() method. The wait() method blocks until the flag is true.
"""
# After Tim Peters' event class (without is_posted())
def __init__(self, verbose=None):
_Verbose.__init__(self, verbose)
self.__cond = Condition(Lock())
self.__flag = False
def _reset_internal_locks(self):
# private! called by Thread._reset_internal_locks by _after_fork()
self.__cond.__init__()
def isSet(self):
'Return true if and only if the internal flag is true.'
return self.__flag
is_set = isSet
def set(self):
"""Set the internal flag to true.
All threads waiting for the flag to become true are awakened. Threads
that call wait() once the flag is true will not block at all.
"""
self.__cond.acquire()
try:
self.__flag = True
self.__cond.notify_all()
finally:
self.__cond.release()
def clear(self):
"""Reset the internal flag to false.
Subsequently, threads calling wait() will block until set() is called to
set the internal flag to true again.
"""
self.__cond.acquire()
try:
self.__flag = False
finally:
self.__cond.release()
def wait(self, timeout=None):
"""Block until the internal flag is true.
If the internal flag is true on entry, return immediately. Otherwise,
block until another thread calls set() to set the flag to true, or until
the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof).
This method returns the internal flag on exit, so it will always return
True except if a timeout is given and the operation times out.
"""
self.__cond.acquire()
try:
if not self.__flag:
self.__cond.wait(timeout)
return self.__flag
finally:
self.__cond.release()
# Helper to generate new thread names
_counter = 0
def _newname(template="Thread-%d"):
global _counter
_counter = _counter + 1
return template % _counter
# Active thread administration
_active_limbo_lock = _allocate_lock()
_active = {} # maps thread id to Thread object
_limbo = {}
# Main class for threads
class Thread(_Verbose):
"""A class that represents a thread of control.
This class can be safely subclassed in a limited fashion.
"""
__initialized = False
# Need to store a reference to sys.exc_info for printing
# out exceptions when a thread tries to use a global var. during interp.
# shutdown and thus raises an exception about trying to perform some
# operation on/with a NoneType
__exc_info = _sys.exc_info
# Keep sys.exc_clear too to clear the exception just before
# allowing .join() to return.
__exc_clear = _sys.exc_clear
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None, verbose=None):
"""This constructor should always be called with keyword arguments. Arguments are:
*group* should be None; reserved for future extension when a ThreadGroup
class is implemented.
*target* is the callable object to be invoked by the run()
method. Defaults to None, meaning nothing is called.
*name* is the thread name. By default, a unique name is constructed of
the form "Thread-N" where N is a small decimal number.
*args* is the argument tuple for the target invocation. Defaults to ().
*kwargs* is a dictionary of keyword arguments for the target
invocation. Defaults to {}.
If a subclass overrides the constructor, it must make sure to invoke
the base class constructor (Thread.__init__()) before doing anything
else to the thread.
"""
assert group is None, "group argument must be None for now"
_Verbose.__init__(self, verbose)
if kwargs is None:
kwargs = {}
self.__target = target
self.__name = str(name or _newname())
self.__args = args
self.__kwargs = kwargs
self.__daemonic = self._set_daemon()
self.__ident = None
self.__started = Event()
self.__stopped = False
self.__block = Condition(Lock())
self.__initialized = True
# sys.stderr is not stored in the class like
# sys.exc_info since it can be changed between instances
self.__stderr = _sys.stderr
def _reset_internal_locks(self):
# private! Called by _after_fork() to reset our internal locks as
# they may be in an invalid state leading to a deadlock or crash.
if hasattr(self, '_Thread__block'): # DummyThread deletes self.__block
self.__block.__init__()
self.__started._reset_internal_locks()
@property
def _block(self):
# used by a unittest
return self.__block
def _set_daemon(self):
# Overridden in _MainThread and _DummyThread
return current_thread().daemon
def __repr__(self):
assert self.__initialized, "Thread.__init__() was not called"
status = "initial"
if self.__started.is_set():
status = "started"
if self.__stopped:
status = "stopped"
if self.__daemonic:
status += " daemon"
if self.__ident is not None:
status += " %s" % self.__ident
return "<%s(%s, %s)>" % (self.__class__.__name__, self.__name, status)
def start(self):
"""Start the thread's activity.
It must be called at most once per thread object. It arranges for the
object's run() method to be invoked in a separate thread of control.
This method will raise a RuntimeError if called more than once on the
same thread object.
"""
if not self.__initialized:
raise RuntimeError("thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("threads can only be started once")
if __debug__:
self._note("%s.start(): starting thread", self)
with _active_limbo_lock:
_limbo[self] = self
try:
_start_new_thread(self.__bootstrap, ())
except Exception:
with _active_limbo_lock:
del _limbo[self]
raise
self.__started.wait()
def run(self):
"""Method representing the thread's activity.
You may override this method in a subclass. The standard run() method
invokes the callable object passed to the object's constructor as the
target argument, if any, with sequential and keyword arguments taken
from the args and kwargs arguments, respectively.
"""
try:
if self.__target:
self.__target(*self.__args, **self.__kwargs)
finally:
# Avoid a refcycle if the thread is running a function with
# an argument that has a member that points to the thread.
del self.__target, self.__args, self.__kwargs
def __bootstrap(self):
# Wrapper around the real bootstrap code that ignores
# exceptions during interpreter cleanup. Those typically
# happen when a daemon thread wakes up at an unfortunate
# moment, finds the world around it destroyed, and raises some
# random exception *** while trying to report the exception in
# __bootstrap_inner() below ***. Those random exceptions
# don't help anybody, and they confuse users, so we suppress
# them. We suppress them only when it appears that the world
# indeed has already been destroyed, so that exceptions in
# __bootstrap_inner() during normal business hours are properly
# reported. Also, we only suppress them for daemonic threads;
# if a non-daemonic encounters this, something else is wrong.
try:
self.__bootstrap_inner()
except:
if self.__daemonic and _sys is None:
return
raise
def _set_ident(self):
self.__ident = _get_ident()
def __bootstrap_inner(self):
try:
self._set_ident()
self.__started.set()
with _active_limbo_lock:
_active[self.__ident] = self
del _limbo[self]
if __debug__:
self._note("%s.__bootstrap(): thread started", self)
if _trace_hook:
self._note("%s.__bootstrap(): registering trace hook", self)
_sys.settrace(_trace_hook)
if _profile_hook:
self._note("%s.__bootstrap(): registering profile hook", self)
_sys.setprofile(_profile_hook)
try:
self.run()
except SystemExit:
if __debug__:
self._note("%s.__bootstrap(): raised SystemExit", self)
except:
if __debug__:
self._note("%s.__bootstrap(): unhandled exception", self)
# If sys.stderr is no more (most likely from interpreter
# shutdown) use self.__stderr. Otherwise still use sys (as in
# _sys) in case sys.stderr was redefined since the creation of
# self.
if _sys:
_sys.stderr.write("Exception in thread %s:\n%s\n" %
(self.name, _format_exc()))
else:
# Do the best job possible w/o a huge amt. of code to
# approximate a traceback (code ideas from
# Lib/traceback.py)
exc_type, exc_value, exc_tb = self.__exc_info()
try:
print>>self.__stderr, (
"Exception in thread " + self.name +
" (most likely raised during interpreter shutdown):")
print>>self.__stderr, (
"Traceback (most recent call last):")
while exc_tb:
print>>self.__stderr, (
' File "%s", line %s, in %s' %
(exc_tb.tb_frame.f_code.co_filename,
exc_tb.tb_lineno,
exc_tb.tb_frame.f_code.co_name))
exc_tb = exc_tb.tb_next
print>>self.__stderr, ("%s: %s" % (exc_type, exc_value))
# Make sure that exc_tb gets deleted since it is a memory
# hog; deleting everything else is just for thoroughness
finally:
del exc_type, exc_value, exc_tb
else:
if __debug__:
self._note("%s.__bootstrap(): normal return", self)
finally:
# Prevent a race in
# test_threading.test_no_refcycle_through_target when
# the exception keeps the target alive past when we
# assert that it's dead.
self.__exc_clear()
finally:
with _active_limbo_lock:
self.__stop()
try:
# We don't call self.__delete() because it also
# grabs _active_limbo_lock.
del _active[_get_ident()]
except:
pass
def __stop(self):
# DummyThreads delete self.__block, but they have no waiters to
# notify anyway (join() is forbidden on them).
if not hasattr(self, '_Thread__block'):
return
self.__block.acquire()
self.__stopped = True
self.__block.notify_all()
self.__block.release()
def __delete(self):
"Remove current thread from the dict of currently running threads."
# Notes about running with dummy_thread:
#
# Must take care to not raise an exception if dummy_thread is being
# used (and thus this module is being used as an instance of
# dummy_threading). dummy_thread.get_ident() always returns -1 since
# there is only one thread if dummy_thread is being used. Thus
# len(_active) is always <= 1 here, and any Thread instance created
# overwrites the (if any) thread currently registered in _active.
#
# An instance of _MainThread is always created by 'threading'. This
# gets overwritten the instant an instance of Thread is created; both
# threads return -1 from dummy_thread.get_ident() and thus have the
# same key in the dict. So when the _MainThread instance created by
# 'threading' tries to clean itself up when atexit calls this method
# it gets a KeyError if another Thread instance was created.
#
# This all means that KeyError from trying to delete something from
# _active if dummy_threading is being used is a red herring. But
# since it isn't if dummy_threading is *not* being used then don't
# hide the exception.
try:
with _active_limbo_lock:
del _active[_get_ident()]
# There must not be any python code between the previous line
# and after the lock is released. Otherwise a tracing function
# could try to acquire the lock again in the same thread, (in
# current_thread()), and would block.
except KeyError:
if 'dummy_threading' not in _sys.modules:
raise
def join(self, timeout=None):
"""Wait until the thread terminates.
This blocks the calling thread until the thread whose join() method is
called terminates -- either normally or through an unhandled exception
or until the optional timeout occurs.
When the timeout argument is present and not None, it should be a
floating point number specifying a timeout for the operation in seconds
(or fractions thereof). As join() always returns None, you must call
isAlive() after join() to decide whether a timeout happened -- if the
thread is still alive, the join() call timed out.
When the timeout argument is not present or None, the operation will
block until the thread terminates.
A thread can be join()ed many times.
join() raises a RuntimeError if an attempt is made to join the current
thread as that would cause a deadlock. It is also an error to join() a
thread before it has been started and attempts to do so raises the same
exception.
"""
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if not self.__started.is_set():
raise RuntimeError("cannot join thread before it is started")
if self is current_thread():
raise RuntimeError("cannot join current thread")
if __debug__:
if not self.__stopped:
self._note("%s.join(): waiting until thread stops", self)
self.__block.acquire()
try:
if timeout is None:
while not self.__stopped:
self.__block.wait()
if __debug__:
self._note("%s.join(): thread stopped", self)
else:
deadline = _time() + timeout
while not self.__stopped:
delay = deadline - _time()
if delay <= 0:
if __debug__:
self._note("%s.join(): timed out", self)
break
self.__block.wait(delay)
else:
if __debug__:
self._note("%s.join(): thread stopped", self)
finally:
self.__block.release()
@property
def name(self):
"""A string used for identification purposes only.
It has no semantics. Multiple threads may be given the same name. The
initial name is set by the constructor.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__name
@name.setter
def name(self, name):
assert self.__initialized, "Thread.__init__() not called"
self.__name = str(name)
@property
def ident(self):
"""Thread identifier of this thread or None if it has not been started.
This is a nonzero integer. See the thread.get_ident() function. Thread
identifiers may be recycled when a thread exits and another thread is
created. The identifier is available even after the thread has exited.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__ident
def isAlive(self):
"""Return whether the thread is alive.
This method returns True just before the run() method starts until just
after the run() method terminates. The module function enumerate()
returns a list of all alive threads.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__started.is_set() and not self.__stopped
is_alive = isAlive
@property
def daemon(self):
"""A boolean value indicating whether this thread is a daemon thread (True) or not (False).
This must be set before start() is called, otherwise RuntimeError is
raised. Its initial value is inherited from the creating thread; the
main thread is not a daemon thread and therefore all threads created in
the main thread default to daemon = False.
The entire Python program exits when no alive non-daemon threads are
left.
"""
assert self.__initialized, "Thread.__init__() not called"
return self.__daemonic
@daemon.setter
def daemon(self, daemonic):
if not self.__initialized:
raise RuntimeError("Thread.__init__() not called")
if self.__started.is_set():
raise RuntimeError("cannot set daemon status of active thread");
self.__daemonic = daemonic
def isDaemon(self):
return self.daemon
def setDaemon(self, daemonic):
self.daemon = daemonic
def getName(self):
return self.name
def setName(self, name):
self.name = name
# The timer class was contributed by Itamar Shtull-Trauring
def Timer(*args, **kwargs):
"""Factory function to create a Timer object.
Timers call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
return _Timer(*args, **kwargs)
class _Timer(Thread):
"""Call a function after a specified number of seconds:
t = Timer(30.0, f, args=[], kwargs={})
t.start()
t.cancel() # stop the timer's action if it's still waiting
"""
def __init__(self, interval, function, args=[], kwargs={}):
Thread.__init__(self)
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.finished = Event()
def cancel(self):
"""Stop the timer if it hasn't finished yet"""
self.finished.set()
def run(self):
self.finished.wait(self.interval)
if not self.finished.is_set():
self.function(*self.args, **self.kwargs)
self.finished.set()
# Special thread class to represent the main thread
# This is garbage collected through an exit handler
class _MainThread(Thread):
def __init__(self):
Thread.__init__(self, name="MainThread")
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return False
def _exitfunc(self):
self._Thread__stop()
t = _pickSomeNonDaemonThread()
if t:
if __debug__:
self._note("%s: waiting for other threads", self)
while t:
t.join()
t = _pickSomeNonDaemonThread()
if __debug__:
self._note("%s: exiting", self)
self._Thread__delete()
def _pickSomeNonDaemonThread():
for t in enumerate():
if not t.daemon and t.is_alive():
return t
return None
# Dummy thread class to represent threads not started here.
# These aren't garbage collected when they die, nor can they be waited for.
# If they invoke anything in threading.py that calls current_thread(), they
# leave an entry in the _active dict forever after.
# Their purpose is to return *something* from current_thread().
# They are marked as daemon threads so we won't wait for them
# when we exit (conform previous semantics).
class _DummyThread(Thread):
def __init__(self):
Thread.__init__(self, name=_newname("Dummy-%d"))
# Thread.__block consumes an OS-level locking primitive, which
# can never be used by a _DummyThread. Since a _DummyThread
# instance is immortal, that's bad, so release this resource.
del self._Thread__block
self._Thread__started.set()
self._set_ident()
with _active_limbo_lock:
_active[_get_ident()] = self
def _set_daemon(self):
return True
def join(self, timeout=None):
assert False, "cannot join a dummy thread"
# Global API functions
def currentThread():
"""Return the current Thread object, corresponding to the caller's thread of control.
If the caller's thread of control was not created through the threading
module, a dummy thread object with limited functionality is returned.
"""
try:
return _active[_get_ident()]
except KeyError:
##print "current_thread(): no current thread for", _get_ident()
return _DummyThread()
current_thread = currentThread
def activeCount():
"""Return the number of Thread objects currently alive.
The returned count is equal to the length of the list returned by
enumerate().
"""
with _active_limbo_lock:
return len(_active) + len(_limbo)
active_count = activeCount
def _enumerate():
# Same as enumerate(), but without the lock. Internal use only.
return _active.values() + _limbo.values()
def enumerate():
"""Return a list of all Thread objects currently alive.
The list includes daemonic threads, dummy thread objects created by
current_thread(), and the main thread. It excludes terminated threads and
threads that have not yet been started.
"""
with _active_limbo_lock:
return _active.values() + _limbo.values()
from thread import stack_size
# Create the main thread object,
# and make it available for the interpreter
# (Py_Main) as threading._shutdown.
_shutdown = _MainThread()._exitfunc
# get thread-local implementation, either from the thread
# module, or from the python fallback
try:
from thread import _local as local
except ImportError:
from _threading_local import local
def _after_fork():
# This function is called by Python/ceval.c:PyEval_ReInitThreads which
# is called from PyOS_AfterFork. Here we cleanup threading module state
# that should not exist after a fork.
# Reset _active_limbo_lock, in case we forked while the lock was held
# by another (non-forked) thread. http://bugs.python.org/issue874900
global _active_limbo_lock
_active_limbo_lock = _allocate_lock()
# fork() only copied the current thread; clear references to others.
new_active = {}
current = current_thread()
with _active_limbo_lock:
for thread in _enumerate():
# Any lock/condition variable may be currently locked or in an
# invalid state, so we reinitialize them.
if hasattr(thread, '_reset_internal_locks'):
thread._reset_internal_locks()
if thread is current:
# There is only one active thread. We reset the ident to
# its new value since it can have changed.
ident = _get_ident()
thread._Thread__ident = ident
new_active[ident] = thread
else:
# All the others are already stopped.
thread._Thread__stop()
_limbo.clear()
_active.clear()
_active.update(new_active)
assert len(_active) == 1
# Self-test code
def _test():
class BoundedQueue(_Verbose):
def __init__(self, limit):
_Verbose.__init__(self)
self.mon = RLock()
self.rc = Condition(self.mon)
self.wc = Condition(self.mon)
self.limit = limit
self.queue = _deque()
def put(self, item):
self.mon.acquire()
while len(self.queue) >= self.limit:
self._note("put(%s): queue full", item)
self.wc.wait()
self.queue.append(item)
self._note("put(%s): appended, length now %d",
item, len(self.queue))
self.rc.notify()
self.mon.release()
def get(self):
self.mon.acquire()
while not self.queue:
self._note("get(): queue empty")
self.rc.wait()
item = self.queue.popleft()
self._note("get(): got %s, %d left", item, len(self.queue))
self.wc.notify()
self.mon.release()
return item
class ProducerThread(Thread):
def __init__(self, queue, quota):
Thread.__init__(self, name="Producer")
self.queue = queue
self.quota = quota
def run(self):
from random import random
counter = 0
while counter < self.quota:
counter = counter + 1
self.queue.put("%s.%d" % (self.name, counter))
_sleep(random() * 0.00001)
class ConsumerThread(Thread):
def __init__(self, queue, count):
Thread.__init__(self, name="Consumer")
self.queue = queue
self.count = count
def run(self):
while self.count > 0:
item = self.queue.get()
print item
self.count = self.count - 1
NP = 3
QL = 4
NI = 5
Q = BoundedQueue(QL)
P = []
for i in range(NP):
t = ProducerThread(Q, NI)
t.name = ("Producer-%d" % (i+1))
P.append(t)
C = ConsumerThread(Q, NI*NP)
for t in P:
t.start()
_sleep(0.000001)
C.start()
for t in P:
t.join()
C.join()
if __name__ == '__main__':
_test()
|
gpl-3.0
|
thomasboyt/zulip
|
bots/jabber_mirror.py
|
17
|
2060
|
#!/usr/bin/env python
# Copyright (C) 2014 Zulip, Inc.
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import subprocess
import os
import traceback
import signal
from zulip import RandomExponentialBackoff
def die(signal, frame):
# We actually want to exit, so run os._exit (so as not to be caught and restarted)
os._exit(1)
signal.signal(signal.SIGINT, die)
args = [os.path.join(os.path.dirname(sys.argv[0]), "jabber_mirror_backend.py")]
args.extend(sys.argv[1:])
backoff = RandomExponentialBackoff(timeout_success_equivalent=300)
while backoff.keep_going():
print "Starting Jabber mirroring bot"
try:
ret = subprocess.call(args)
except:
traceback.print_exc()
else:
if ret == 2:
# Don't try again on initial configuration errors
sys.exit(ret)
backoff.fail()
print ""
print ""
print "ERROR: The Jabber mirroring bot is unable to continue mirroring Jabber."
print "Please contact [email protected] if you need assistance."
print ""
sys.exit(1)
|
apache-2.0
|
embray/astropy_helpers
|
astropy_helpers/sphinx/ext/phantom_import.py
|
63
|
5854
|
"""
==============
phantom_import
==============
Sphinx extension to make directives from ``sphinx.ext.autodoc`` and similar
extensions to use docstrings loaded from an XML file.
This extension loads an XML file in the Pydocweb format [1] and
creates a dummy module that contains the specified docstrings. This
can be used to get the current docstrings from a Pydocweb instance
without needing to rebuild the documented module.
.. [1] http://code.google.com/p/pydocweb
"""
from __future__ import division, absolute_import, print_function
import imp, sys, compiler, types, os, inspect, re
def setup(app):
app.connect('builder-inited', initialize)
app.add_config_value('phantom_import_file', None, True)
def initialize(app):
fn = app.config.phantom_import_file
if (fn and os.path.isfile(fn)):
print("[numpydoc] Phantom importing modules from", fn, "...")
import_phantom_module(fn)
#------------------------------------------------------------------------------
# Creating 'phantom' modules from an XML description
#------------------------------------------------------------------------------
def import_phantom_module(xml_file):
"""
Insert a fake Python module to sys.modules, based on a XML file.
The XML file is expected to conform to Pydocweb DTD. The fake
module will contain dummy objects, which guarantee the following:
- Docstrings are correct.
- Class inheritance relationships are correct (if present in XML).
- Function argspec is *NOT* correct (even if present in XML).
Instead, the function signature is prepended to the function docstring.
- Class attributes are *NOT* correct; instead, they are dummy objects.
Parameters
----------
xml_file : str
Name of an XML file to read
"""
import lxml.etree as etree
object_cache = {}
tree = etree.parse(xml_file)
root = tree.getroot()
# Sort items so that
# - Base classes come before classes inherited from them
# - Modules come before their contents
all_nodes = dict([(n.attrib['id'], n) for n in root])
def _get_bases(node, recurse=False):
bases = [x.attrib['ref'] for x in node.findall('base')]
if recurse:
j = 0
while True:
try:
b = bases[j]
except IndexError: break
if b in all_nodes:
bases.extend(_get_bases(all_nodes[b]))
j += 1
return bases
type_index = ['module', 'class', 'callable', 'object']
def base_cmp(a, b):
x = cmp(type_index.index(a.tag), type_index.index(b.tag))
if x != 0: return x
if a.tag == 'class' and b.tag == 'class':
a_bases = _get_bases(a, recurse=True)
b_bases = _get_bases(b, recurse=True)
x = cmp(len(a_bases), len(b_bases))
if x != 0: return x
if a.attrib['id'] in b_bases: return -1
if b.attrib['id'] in a_bases: return 1
return cmp(a.attrib['id'].count('.'), b.attrib['id'].count('.'))
nodes = root.getchildren()
nodes.sort(base_cmp)
# Create phantom items
for node in nodes:
name = node.attrib['id']
doc = (node.text or '').decode('string-escape') + "\n"
if doc == "\n": doc = ""
# create parent, if missing
parent = name
while True:
parent = '.'.join(parent.split('.')[:-1])
if not parent: break
if parent in object_cache: break
obj = imp.new_module(parent)
object_cache[parent] = obj
sys.modules[parent] = obj
# create object
if node.tag == 'module':
obj = imp.new_module(name)
obj.__doc__ = doc
sys.modules[name] = obj
elif node.tag == 'class':
bases = [object_cache[b] for b in _get_bases(node)
if b in object_cache]
bases.append(object)
init = lambda self: None
init.__doc__ = doc
obj = type(name, tuple(bases), {'__doc__': doc, '__init__': init})
obj.__name__ = name.split('.')[-1]
elif node.tag == 'callable':
funcname = node.attrib['id'].split('.')[-1]
argspec = node.attrib.get('argspec')
if argspec:
argspec = re.sub('^[^(]*', '', argspec)
doc = "%s%s\n\n%s" % (funcname, argspec, doc)
obj = lambda: 0
obj.__argspec_is_invalid_ = True
if sys.version_info[0] >= 3:
obj.__name__ = funcname
else:
obj.func_name = funcname
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__objclass__ = object_cache[parent]
else:
class Dummy(object): pass
obj = Dummy()
obj.__name__ = name
obj.__doc__ = doc
if inspect.isclass(object_cache[parent]):
obj.__get__ = lambda: None
object_cache[name] = obj
if parent:
if inspect.ismodule(object_cache[parent]):
obj.__module__ = parent
setattr(object_cache[parent], name.split('.')[-1], obj)
# Populate items
for node in root:
obj = object_cache.get(node.attrib['id'])
if obj is None: continue
for ref in node.findall('ref'):
if node.tag == 'class':
if ref.attrib['ref'].startswith(node.attrib['id'] + '.'):
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
else:
setattr(obj, ref.attrib['name'],
object_cache.get(ref.attrib['ref']))
|
bsd-3-clause
|
kowito/django-autocomplete-light
|
autocomplete_light/tests/generic.py
|
3
|
2146
|
import unittest
from django.test import TransactionTestCase
from django.contrib.contenttypes.models import ContentType
from cities_light.models import Country, City
from gfk_autocomplete.forms import TaggedItemForm
from optionnal_gfk_autocomplete.forms import OptionnalTaggedItemForm
import autocomplete_light
class GenericModelFormTestCase(unittest.TestCase):
def setUp(self):
self.country, c = Country.objects.get_or_create(name='Countryname')
self.city, c = City.objects.get_or_create(country=self.country,
name=u'Paris')
def tearDown(self):
self.country.delete()
self.city.delete()
def test_model_form(self):
tests = (
{
'content_object': self.city,
'tag': 'foo',
'valid': True,
'form_class': TaggedItemForm,
},
{
'tag': 'bar',
'valid': False,
'form_class': TaggedItemForm,
},
{
'content_object': self.city,
'tag': 'foo',
'valid': True,
'form_class': OptionnalTaggedItemForm,
},
{
'tag': 'bar',
'valid': True,
'form_class': OptionnalTaggedItemForm,
},
)
for test in tests:
if 'data' not in test.keys():
test['data'] = {'tag': test.get('tag', None)}
if 'content_object' in test.keys():
test['data']['content_object'] = u'%s-%s' % (
ContentType.objects.get_for_model(test['content_object']).pk,
test['content_object'].pk)
form = test['form_class'](test['data'])
self.assertEqual(form.is_valid(), test['valid'])
if test['valid']:
result = form.save()
self.assertEqual(test['tag'], result.tag)
if 'content_object' in test.keys():
self.assertEqual(test['content_object'],
result.content_object)
|
mit
|
rogerthat-platform/rogerthat-ios-client
|
3rdParty/Code/3rdParty/zxing/cpp/scons/scons-local-2.0.0.final.0/SCons/__init__.py
|
34
|
1629
|
"""SCons
The main package for the SCons software construction utility.
"""
#
# Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/__init__.py 5023 2010/06/14 22:05:46 scons"
__version__ = "2.0.0.final.0"
__build__ = "r5023"
__buildsys__ = "scons-dev"
__date__ = "2010/06/14 22:05:46"
__developer__ = "scons"
# make sure compatibility is always in place
import SCons.compat
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
|
apache-2.0
|
vm03/android_kernel_asus_P024
|
tools/perf/scripts/python/syscall-counts-by-pid.py
|
11180
|
1927
|
# system call counts, by pid
# (c) 2010, Tom Zanussi <[email protected]>
# Licensed under the terms of the GNU GPL License version 2
#
# Displays system-wide system call totals, broken down by syscall.
# If a [comm] arg is specified, only syscalls called by [comm] are displayed.
import os, sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import syscall_name
usage = "perf script -s syscall-counts-by-pid.py [comm]\n";
for_comm = None
for_pid = None
if len(sys.argv) > 2:
sys.exit(usage)
if len(sys.argv) > 1:
try:
for_pid = int(sys.argv[1])
except:
for_comm = sys.argv[1]
syscalls = autodict()
def trace_begin():
print "Press control+C to stop and show the summary"
def trace_end():
print_syscall_totals()
def raw_syscalls__sys_enter(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
id, args):
if (for_comm and common_comm != for_comm) or \
(for_pid and common_pid != for_pid ):
return
try:
syscalls[common_comm][common_pid][id] += 1
except TypeError:
syscalls[common_comm][common_pid][id] = 1
def print_syscall_totals():
if for_comm is not None:
print "\nsyscall events for %s:\n\n" % (for_comm),
else:
print "\nsyscall events by comm/pid:\n\n",
print "%-40s %10s\n" % ("comm [pid]/syscalls", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"----------"),
comm_keys = syscalls.keys()
for comm in comm_keys:
pid_keys = syscalls[comm].keys()
for pid in pid_keys:
print "\n%s [%d]\n" % (comm, pid),
id_keys = syscalls[comm][pid].keys()
for id, val in sorted(syscalls[comm][pid].iteritems(), \
key = lambda(k, v): (v, k), reverse = True):
print " %-38s %10d\n" % (syscall_name(id), val),
|
gpl-2.0
|
Bysmyyr/chromium-crosswalk
|
tools/telemetry/third_party/gsutilz/third_party/boto/tests/integration/opsworks/test_layer1.py
|
114
|
2121
|
# Copyright (c) 2013 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
from boto.exception import JSONResponseError
from boto.opsworks import connect_to_region, regions, RegionInfo
from boto.opsworks.layer1 import OpsWorksConnection
from tests.compat import unittest
class TestOpsWorksConnection(unittest.TestCase):
opsworks = True
def setUp(self):
self.api = OpsWorksConnection()
def test_describe_stacks(self):
response = self.api.describe_stacks()
self.assertIn('Stacks', response)
def test_validation_errors(self):
with self.assertRaises(JSONResponseError):
self.api.create_stack('testbotostack', 'us-east-1',
'badarn', 'badarn2')
class TestOpsWorksHelpers(unittest.TestCase):
opsworks = True
def test_regions(self):
response = regions()
self.assertIsInstance(response[0], RegionInfo)
def test_connect_to_region(self):
connection = connect_to_region('us-east-1')
self.assertIsInstance(connection, OpsWorksConnection)
|
bsd-3-clause
|
gerryhd/diabot-assistant
|
lib/python2.7/site-packages/pip/_vendor/requests/__init__.py
|
327
|
2326
|
# -*- coding: utf-8 -*-
# __
# /__) _ _ _ _ _/ _
# / ( (- (/ (/ (- _) / _)
# /
"""
Requests HTTP library
~~~~~~~~~~~~~~~~~~~~~
Requests is an HTTP library, written in Python, for human beings. Basic GET
usage:
>>> import requests
>>> r = requests.get('https://www.python.org')
>>> r.status_code
200
>>> 'Python is a programming language' in r.content
True
... or POST:
>>> payload = dict(key1='value1', key2='value2')
>>> r = requests.post('http://httpbin.org/post', data=payload)
>>> print(r.text)
{
...
"form": {
"key2": "value2",
"key1": "value1"
},
...
}
The other HTTP methods are supported - see `requests.api`. Full documentation
is at <http://python-requests.org>.
:copyright: (c) 2016 by Kenneth Reitz.
:license: Apache 2.0, see LICENSE for more details.
"""
__title__ = 'requests'
__version__ = '2.11.1'
__build__ = 0x021101
__author__ = 'Kenneth Reitz'
__license__ = 'Apache 2.0'
__copyright__ = 'Copyright 2016 Kenneth Reitz'
# Attempt to enable urllib3's SNI support, if possible
# Note: Patched by pip to prevent using the PyOpenSSL module. On Windows this
# prevents upgrading cryptography.
# try:
# from .packages.urllib3.contrib import pyopenssl
# pyopenssl.inject_into_urllib3()
# except ImportError:
# pass
import warnings
# urllib3's DependencyWarnings should be silenced.
from .packages.urllib3.exceptions import DependencyWarning
warnings.simplefilter('ignore', DependencyWarning)
from . import utils
from .models import Request, Response, PreparedRequest
from .api import request, get, head, post, patch, put, delete, options
from .sessions import session, Session
from .status_codes import codes
from .exceptions import (
RequestException, Timeout, URLRequired,
TooManyRedirects, HTTPError, ConnectionError,
FileModeWarning, ConnectTimeout, ReadTimeout
)
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
# FileModeWarnings go off per the default.
warnings.simplefilter('default', FileModeWarning, append=True)
|
gpl-3.0
|
artful-addict/rwd-starter-kit
|
node_modules/node-gyp/gyp/pylib/gyp/generator/cmake.py
|
1355
|
44604
|
# Copyright (c) 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""cmake output module
This module is under development and should be considered experimental.
This module produces cmake (2.8.8+) input as its output. One CMakeLists.txt is
created for each configuration.
This module's original purpose was to support editing in IDEs like KDevelop
which use CMake for project management. It is also possible to use CMake to
generate projects for other IDEs such as eclipse cdt and code::blocks. QtCreator
will convert the CMakeLists.txt to a code::blocks cbp for the editor to read,
but build using CMake. As a result QtCreator editor is unaware of compiler
defines. The generated CMakeLists.txt can also be used to build on Linux. There
is currently no support for building on platforms other than Linux.
The generated CMakeLists.txt should properly compile all projects. However,
there is a mismatch between gyp and cmake with regard to linking. All attempts
are made to work around this, but CMake sometimes sees -Wl,--start-group as a
library and incorrectly repeats it. As a result the output of this generator
should not be relied on for building.
When using with kdevelop, use version 4.4+. Previous versions of kdevelop will
not be able to find the header file directories described in the generated
CMakeLists.txt file.
"""
import multiprocessing
import os
import signal
import string
import subprocess
import gyp.common
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_PREFIX': 'lib',
'SHARED_LIB_SUFFIX': '.so',
'SHARED_LIB_DIR': '${builddir}/lib.${TOOLSET}',
'LIB_DIR': '${obj}.${TOOLSET}',
'INTERMEDIATE_DIR': '${obj}.${TOOLSET}/${TARGET}/geni',
'SHARED_INTERMEDIATE_DIR': '${obj}/gen',
'PRODUCT_DIR': '${builddir}',
'RULE_INPUT_PATH': '${RULE_INPUT_PATH}',
'RULE_INPUT_DIRNAME': '${RULE_INPUT_DIRNAME}',
'RULE_INPUT_NAME': '${RULE_INPUT_NAME}',
'RULE_INPUT_ROOT': '${RULE_INPUT_ROOT}',
'RULE_INPUT_EXT': '${RULE_INPUT_EXT}',
'CONFIGURATION_NAME': '${configuration}',
}
FULL_PATH_VARS = ('${CMAKE_CURRENT_LIST_DIR}', '${builddir}', '${obj}')
generator_supports_multiple_toolsets = True
generator_wants_static_library_dependencies_adjusted = True
COMPILABLE_EXTENSIONS = {
'.c': 'cc',
'.cc': 'cxx',
'.cpp': 'cxx',
'.cxx': 'cxx',
'.s': 's', # cc
'.S': 's', # cc
}
def RemovePrefix(a, prefix):
"""Returns 'a' without 'prefix' if it starts with 'prefix'."""
return a[len(prefix):] if a.startswith(prefix) else a
def CalculateVariables(default_variables, params):
"""Calculate additional variables for use in the build (called by gyp)."""
default_variables.setdefault('OS', gyp.common.GetFlavor(params))
def Compilable(filename):
"""Return true if the file is compilable (should be in OBJS)."""
return any(filename.endswith(e) for e in COMPILABLE_EXTENSIONS)
def Linkable(filename):
"""Return true if the file is linkable (should be on the link line)."""
return filename.endswith('.o')
def NormjoinPathForceCMakeSource(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
If rel_path is an absolute path it is returned unchanged.
Otherwise it is resolved against base_path and normalized.
If the result is a relative path, it is forced to be relative to the
CMakeLists.txt.
"""
if os.path.isabs(rel_path):
return rel_path
if any([rel_path.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
# TODO: do we need to check base_path for absolute variables as well?
return os.path.join('${CMAKE_CURRENT_LIST_DIR}',
os.path.normpath(os.path.join(base_path, rel_path)))
def NormjoinPath(base_path, rel_path):
"""Resolves rel_path against base_path and returns the result.
TODO: what is this really used for?
If rel_path begins with '$' it is returned unchanged.
Otherwise it is resolved against base_path if relative, then normalized.
"""
if rel_path.startswith('$') and not rel_path.startswith('${configuration}'):
return rel_path
return os.path.normpath(os.path.join(base_path, rel_path))
def CMakeStringEscape(a):
"""Escapes the string 'a' for use inside a CMake string.
This means escaping
'\' otherwise it may be seen as modifying the next character
'"' otherwise it will end the string
';' otherwise the string becomes a list
The following do not need to be escaped
'#' when the lexer is in string state, this does not start a comment
The following are yet unknown
'$' generator variables (like ${obj}) must not be escaped,
but text $ should be escaped
what is wanted is to know which $ come from generator variables
"""
return a.replace('\\', '\\\\').replace(';', '\\;').replace('"', '\\"')
def SetFileProperty(output, source_name, property_name, values, sep):
"""Given a set of source file, sets the given property on them."""
output.write('set_source_files_properties(')
output.write(source_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetFilesProperty(output, variable, property_name, values, sep):
"""Given a set of source files, sets the given property on them."""
output.write('set_source_files_properties(')
WriteVariable(output, variable)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetTargetProperty(output, target_name, property_name, values, sep=''):
"""Given a target, sets the given property."""
output.write('set_target_properties(')
output.write(target_name)
output.write(' PROPERTIES ')
output.write(property_name)
output.write(' "')
for value in values:
output.write(CMakeStringEscape(value))
output.write(sep)
output.write('")\n')
def SetVariable(output, variable_name, value):
"""Sets a CMake variable."""
output.write('set(')
output.write(variable_name)
output.write(' "')
output.write(CMakeStringEscape(value))
output.write('")\n')
def SetVariableList(output, variable_name, values):
"""Sets a CMake variable to a list."""
if not values:
return SetVariable(output, variable_name, "")
if len(values) == 1:
return SetVariable(output, variable_name, values[0])
output.write('list(APPEND ')
output.write(variable_name)
output.write('\n "')
output.write('"\n "'.join([CMakeStringEscape(value) for value in values]))
output.write('")\n')
def UnsetVariable(output, variable_name):
"""Unsets a CMake variable."""
output.write('unset(')
output.write(variable_name)
output.write(')\n')
def WriteVariable(output, variable_name, prepend=None):
if prepend:
output.write(prepend)
output.write('${')
output.write(variable_name)
output.write('}')
class CMakeTargetType(object):
def __init__(self, command, modifier, property_modifier):
self.command = command
self.modifier = modifier
self.property_modifier = property_modifier
cmake_target_type_from_gyp_target_type = {
'executable': CMakeTargetType('add_executable', None, 'RUNTIME'),
'static_library': CMakeTargetType('add_library', 'STATIC', 'ARCHIVE'),
'shared_library': CMakeTargetType('add_library', 'SHARED', 'LIBRARY'),
'loadable_module': CMakeTargetType('add_library', 'MODULE', 'LIBRARY'),
'none': CMakeTargetType('add_custom_target', 'SOURCES', None),
}
def StringToCMakeTargetName(a):
"""Converts the given string 'a' to a valid CMake target name.
All invalid characters are replaced by '_'.
Invalid for cmake: ' ', '/', '(', ')', '"'
Invalid for make: ':'
Invalid for unknown reasons but cause failures: '.'
"""
return a.translate(string.maketrans(' /():."', '_______'))
def WriteActions(target_name, actions, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'actions' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for action in actions:
action_name = StringToCMakeTargetName(action['action_name'])
action_target_name = '%s__%s' % (target_name, action_name)
inputs = action['inputs']
inputs_name = action_target_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = action['outputs']
cmake_outputs = [NormjoinPathForceCMakeSource(path_to_gyp, out)
for out in outputs]
outputs_name = action_target_name + '__output'
SetVariableList(output, outputs_name, cmake_outputs)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
if int(action.get('process_outputs_as_sources', False)):
extra_sources.extend(zip(cmake_outputs, outputs))
# add_custom_command
output.write('add_custom_command(OUTPUT ')
WriteVariable(output, outputs_name)
output.write('\n')
if len(dirs) > 0:
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(action['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write('\n')
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in action:
output.write(action['message'])
else:
output.write(action_target_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(action_target_name)
output.write('\n DEPENDS ')
WriteVariable(output, outputs_name)
output.write('\n SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n)\n')
extra_deps.append(action_target_name)
def NormjoinRulePathForceCMakeSource(base_path, rel_path, rule_source):
if rel_path.startswith(("${RULE_INPUT_PATH}","${RULE_INPUT_DIRNAME}")):
if any([rule_source.startswith(var) for var in FULL_PATH_VARS]):
return rel_path
return NormjoinPathForceCMakeSource(base_path, rel_path)
def WriteRules(target_name, rules, extra_sources, extra_deps,
path_to_gyp, output):
"""Write CMake for the 'rules' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_sources: [(<cmake_src>, <src>)] to append with generated source files.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
for rule in rules:
rule_name = StringToCMakeTargetName(target_name + '__' + rule['rule_name'])
inputs = rule.get('inputs', [])
inputs_name = rule_name + '__input'
SetVariableList(output, inputs_name,
[NormjoinPathForceCMakeSource(path_to_gyp, dep) for dep in inputs])
outputs = rule['outputs']
var_outputs = []
for count, rule_source in enumerate(rule.get('rule_sources', [])):
action_name = rule_name + '_' + str(count)
rule_source_dirname, rule_source_basename = os.path.split(rule_source)
rule_source_root, rule_source_ext = os.path.splitext(rule_source_basename)
SetVariable(output, 'RULE_INPUT_PATH', rule_source)
SetVariable(output, 'RULE_INPUT_DIRNAME', rule_source_dirname)
SetVariable(output, 'RULE_INPUT_NAME', rule_source_basename)
SetVariable(output, 'RULE_INPUT_ROOT', rule_source_root)
SetVariable(output, 'RULE_INPUT_EXT', rule_source_ext)
# Build up a list of outputs.
# Collect the output dirs we'll need.
dirs = set(dir for dir in (os.path.dirname(o) for o in outputs) if dir)
# Create variables for the output, as 'local' variable will be unset.
these_outputs = []
for output_index, out in enumerate(outputs):
output_name = action_name + '_' + str(output_index)
SetVariable(output, output_name,
NormjoinRulePathForceCMakeSource(path_to_gyp, out,
rule_source))
if int(rule.get('process_outputs_as_sources', False)):
extra_sources.append(('${' + output_name + '}', out))
these_outputs.append('${' + output_name + '}')
var_outputs.append('${' + output_name + '}')
# add_custom_command
output.write('add_custom_command(OUTPUT\n')
for out in these_outputs:
output.write(' ')
output.write(out)
output.write('\n')
for directory in dirs:
output.write(' COMMAND ${CMAKE_COMMAND} -E make_directory ')
output.write(directory)
output.write('\n')
output.write(' COMMAND ')
output.write(gyp.common.EncodePOSIXShellList(rule['action']))
output.write('\n')
output.write(' DEPENDS ')
WriteVariable(output, inputs_name)
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
# CMAKE_CURRENT_LIST_DIR is where the CMakeLists.txt lives.
# The cwd is the current build directory.
output.write(' WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write(' COMMENT ')
if 'message' in rule:
output.write(rule['message'])
else:
output.write(action_name)
output.write('\n')
output.write(' VERBATIM\n')
output.write(')\n')
UnsetVariable(output, 'RULE_INPUT_PATH')
UnsetVariable(output, 'RULE_INPUT_DIRNAME')
UnsetVariable(output, 'RULE_INPUT_NAME')
UnsetVariable(output, 'RULE_INPUT_ROOT')
UnsetVariable(output, 'RULE_INPUT_EXT')
# add_custom_target
output.write('add_custom_target(')
output.write(rule_name)
output.write(' DEPENDS\n')
for out in var_outputs:
output.write(' ')
output.write(out)
output.write('\n')
output.write('SOURCES ')
WriteVariable(output, inputs_name)
output.write('\n')
for rule_source in rule.get('rule_sources', []):
output.write(' ')
output.write(NormjoinPath(path_to_gyp, rule_source))
output.write('\n')
output.write(')\n')
extra_deps.append(rule_name)
def WriteCopies(target_name, copies, extra_deps, path_to_gyp, output):
"""Write CMake for the 'copies' in the target.
Args:
target_name: the name of the CMake target being generated.
actions: the Gyp 'actions' dict for this target.
extra_deps: [<cmake_taget>] to append with generated targets.
path_to_gyp: relative path from CMakeLists.txt being generated to
the Gyp file in which the target being generated is defined.
"""
copy_name = target_name + '__copies'
# CMake gets upset with custom targets with OUTPUT which specify no output.
have_copies = any(copy['files'] for copy in copies)
if not have_copies:
output.write('add_custom_target(')
output.write(copy_name)
output.write(')\n')
extra_deps.append(copy_name)
return
class Copy(object):
def __init__(self, ext, command):
self.cmake_inputs = []
self.cmake_outputs = []
self.gyp_inputs = []
self.gyp_outputs = []
self.ext = ext
self.inputs_name = None
self.outputs_name = None
self.command = command
file_copy = Copy('', 'copy')
dir_copy = Copy('_dirs', 'copy_directory')
for copy in copies:
files = copy['files']
destination = copy['destination']
for src in files:
path = os.path.normpath(src)
basename = os.path.split(path)[1]
dst = os.path.join(destination, basename)
copy = file_copy if os.path.basename(src) else dir_copy
copy.cmake_inputs.append(NormjoinPathForceCMakeSource(path_to_gyp, src))
copy.cmake_outputs.append(NormjoinPathForceCMakeSource(path_to_gyp, dst))
copy.gyp_inputs.append(src)
copy.gyp_outputs.append(dst)
for copy in (file_copy, dir_copy):
if copy.cmake_inputs:
copy.inputs_name = copy_name + '__input' + copy.ext
SetVariableList(output, copy.inputs_name, copy.cmake_inputs)
copy.outputs_name = copy_name + '__output' + copy.ext
SetVariableList(output, copy.outputs_name, copy.cmake_outputs)
# add_custom_command
output.write('add_custom_command(\n')
output.write('OUTPUT')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n')
for copy in (file_copy, dir_copy):
for src, dst in zip(copy.gyp_inputs, copy.gyp_outputs):
# 'cmake -E copy src dst' will create the 'dst' directory if needed.
output.write('COMMAND ${CMAKE_COMMAND} -E %s ' % copy.command)
output.write(src)
output.write(' ')
output.write(dst)
output.write("\n")
output.write('DEPENDS')
for copy in (file_copy, dir_copy):
if copy.inputs_name:
WriteVariable(output, copy.inputs_name, ' ')
output.write('\n')
output.write('WORKING_DIRECTORY ${CMAKE_CURRENT_LIST_DIR}/')
output.write(path_to_gyp)
output.write('\n')
output.write('COMMENT Copying for ')
output.write(target_name)
output.write('\n')
output.write('VERBATIM\n')
output.write(')\n')
# add_custom_target
output.write('add_custom_target(')
output.write(copy_name)
output.write('\n DEPENDS')
for copy in (file_copy, dir_copy):
if copy.outputs_name:
WriteVariable(output, copy.outputs_name, ' ')
output.write('\n SOURCES')
if file_copy.inputs_name:
WriteVariable(output, file_copy.inputs_name, ' ')
output.write('\n)\n')
extra_deps.append(copy_name)
def CreateCMakeTargetBaseName(qualified_target):
"""This is the name we would like the target to have."""
_, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_base_name = gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_base_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_base_name)
def CreateCMakeTargetFullName(qualified_target):
"""An unambiguous name for the target."""
gyp_file, gyp_target_name, gyp_target_toolset = (
gyp.common.ParseQualifiedTarget(qualified_target))
cmake_target_full_name = gyp_file + ':' + gyp_target_name
if gyp_target_toolset and gyp_target_toolset != 'target':
cmake_target_full_name += '_' + gyp_target_toolset
return StringToCMakeTargetName(cmake_target_full_name)
class CMakeNamer(object):
"""Converts Gyp target names into CMake target names.
CMake requires that target names be globally unique. One way to ensure
this is to fully qualify the names of the targets. Unfortunatly, this
ends up with all targets looking like "chrome_chrome_gyp_chrome" instead
of just "chrome". If this generator were only interested in building, it
would be possible to fully qualify all target names, then create
unqualified target names which depend on all qualified targets which
should have had that name. This is more or less what the 'make' generator
does with aliases. However, one goal of this generator is to create CMake
files for use with IDEs, and fully qualified names are not as user
friendly.
Since target name collision is rare, we do the above only when required.
Toolset variants are always qualified from the base, as this is required for
building. However, it also makes sense for an IDE, as it is possible for
defines to be different.
"""
def __init__(self, target_list):
self.cmake_target_base_names_conficting = set()
cmake_target_base_names_seen = set()
for qualified_target in target_list:
cmake_target_base_name = CreateCMakeTargetBaseName(qualified_target)
if cmake_target_base_name not in cmake_target_base_names_seen:
cmake_target_base_names_seen.add(cmake_target_base_name)
else:
self.cmake_target_base_names_conficting.add(cmake_target_base_name)
def CreateCMakeTargetName(self, qualified_target):
base_name = CreateCMakeTargetBaseName(qualified_target)
if base_name in self.cmake_target_base_names_conficting:
return CreateCMakeTargetFullName(qualified_target)
return base_name
def WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output):
# The make generator does this always.
# TODO: It would be nice to be able to tell CMake all dependencies.
circular_libs = generator_flags.get('circular', True)
if not generator_flags.get('standalone', False):
output.write('\n#')
output.write(qualified_target)
output.write('\n')
gyp_file, _, _ = gyp.common.ParseQualifiedTarget(qualified_target)
rel_gyp_file = gyp.common.RelativePath(gyp_file, options.toplevel_dir)
rel_gyp_dir = os.path.dirname(rel_gyp_file)
# Relative path from build dir to top dir.
build_to_top = gyp.common.InvertRelativePath(build_dir, options.toplevel_dir)
# Relative path from build dir to gyp dir.
build_to_gyp = os.path.join(build_to_top, rel_gyp_dir)
path_from_cmakelists_to_gyp = build_to_gyp
spec = target_dicts.get(qualified_target, {})
config = spec.get('configurations', {}).get(config_to_use, {})
target_name = spec.get('target_name', '<missing target name>')
target_type = spec.get('type', '<missing target type>')
target_toolset = spec.get('toolset')
cmake_target_type = cmake_target_type_from_gyp_target_type.get(target_type)
if cmake_target_type is None:
print ('Target %s has unknown target type %s, skipping.' %
( target_name, target_type ) )
return
SetVariable(output, 'TARGET', target_name)
SetVariable(output, 'TOOLSET', target_toolset)
cmake_target_name = namer.CreateCMakeTargetName(qualified_target)
extra_sources = []
extra_deps = []
# Actions must come first, since they can generate more OBJs for use below.
if 'actions' in spec:
WriteActions(cmake_target_name, spec['actions'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Rules must be early like actions.
if 'rules' in spec:
WriteRules(cmake_target_name, spec['rules'], extra_sources, extra_deps,
path_from_cmakelists_to_gyp, output)
# Copies
if 'copies' in spec:
WriteCopies(cmake_target_name, spec['copies'], extra_deps,
path_from_cmakelists_to_gyp, output)
# Target and sources
srcs = spec.get('sources', [])
# Gyp separates the sheep from the goats based on file extensions.
# A full separation is done here because of flag handing (see below).
s_sources = []
c_sources = []
cxx_sources = []
linkable_sources = []
other_sources = []
for src in srcs:
_, ext = os.path.splitext(src)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
src_norm_path = NormjoinPath(path_from_cmakelists_to_gyp, src);
if src_type == 's':
s_sources.append(src_norm_path)
elif src_type == 'cc':
c_sources.append(src_norm_path)
elif src_type == 'cxx':
cxx_sources.append(src_norm_path)
elif Linkable(ext):
linkable_sources.append(src_norm_path)
else:
other_sources.append(src_norm_path)
for extra_source in extra_sources:
src, real_source = extra_source
_, ext = os.path.splitext(real_source)
src_type = COMPILABLE_EXTENSIONS.get(ext, None)
if src_type == 's':
s_sources.append(src)
elif src_type == 'cc':
c_sources.append(src)
elif src_type == 'cxx':
cxx_sources.append(src)
elif Linkable(ext):
linkable_sources.append(src)
else:
other_sources.append(src)
s_sources_name = None
if s_sources:
s_sources_name = cmake_target_name + '__asm_srcs'
SetVariableList(output, s_sources_name, s_sources)
c_sources_name = None
if c_sources:
c_sources_name = cmake_target_name + '__c_srcs'
SetVariableList(output, c_sources_name, c_sources)
cxx_sources_name = None
if cxx_sources:
cxx_sources_name = cmake_target_name + '__cxx_srcs'
SetVariableList(output, cxx_sources_name, cxx_sources)
linkable_sources_name = None
if linkable_sources:
linkable_sources_name = cmake_target_name + '__linkable_srcs'
SetVariableList(output, linkable_sources_name, linkable_sources)
other_sources_name = None
if other_sources:
other_sources_name = cmake_target_name + '__other_srcs'
SetVariableList(output, other_sources_name, other_sources)
# CMake gets upset when executable targets provide no sources.
# http://www.cmake.org/pipermail/cmake/2010-July/038461.html
dummy_sources_name = None
has_sources = (s_sources_name or
c_sources_name or
cxx_sources_name or
linkable_sources_name or
other_sources_name)
if target_type == 'executable' and not has_sources:
dummy_sources_name = cmake_target_name + '__dummy_srcs'
SetVariable(output, dummy_sources_name,
"${obj}.${TOOLSET}/${TARGET}/genc/dummy.c")
output.write('if(NOT EXISTS "')
WriteVariable(output, dummy_sources_name)
output.write('")\n')
output.write(' file(WRITE "')
WriteVariable(output, dummy_sources_name)
output.write('" "")\n')
output.write("endif()\n")
# CMake is opposed to setting linker directories and considers the practice
# of setting linker directories dangerous. Instead, it favors the use of
# find_library and passing absolute paths to target_link_libraries.
# However, CMake does provide the command link_directories, which adds
# link directories to targets defined after it is called.
# As a result, link_directories must come before the target definition.
# CMake unfortunately has no means of removing entries from LINK_DIRECTORIES.
library_dirs = config.get('library_dirs')
if library_dirs is not None:
output.write('link_directories(')
for library_dir in library_dirs:
output.write(' ')
output.write(NormjoinPath(path_from_cmakelists_to_gyp, library_dir))
output.write('\n')
output.write(')\n')
output.write(cmake_target_type.command)
output.write('(')
output.write(cmake_target_name)
if cmake_target_type.modifier is not None:
output.write(' ')
output.write(cmake_target_type.modifier)
if s_sources_name:
WriteVariable(output, s_sources_name, ' ')
if c_sources_name:
WriteVariable(output, c_sources_name, ' ')
if cxx_sources_name:
WriteVariable(output, cxx_sources_name, ' ')
if linkable_sources_name:
WriteVariable(output, linkable_sources_name, ' ')
if other_sources_name:
WriteVariable(output, other_sources_name, ' ')
if dummy_sources_name:
WriteVariable(output, dummy_sources_name, ' ')
output.write(')\n')
# Let CMake know if the 'all' target should depend on this target.
exclude_from_all = ('TRUE' if qualified_target not in all_qualified_targets
else 'FALSE')
SetTargetProperty(output, cmake_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
for extra_target_name in extra_deps:
SetTargetProperty(output, extra_target_name,
'EXCLUDE_FROM_ALL', exclude_from_all)
# Output name and location.
if target_type != 'none':
# Link as 'C' if there are no other files
if not c_sources and not cxx_sources:
SetTargetProperty(output, cmake_target_name, 'LINKER_LANGUAGE', ['C'])
# Mark uncompiled sources as uncompiled.
if other_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES HEADER_FILE_ONLY "TRUE")\n')
# Mark object sources as linkable.
if linkable_sources_name:
output.write('set_source_files_properties(')
WriteVariable(output, other_sources_name, '')
output.write(' PROPERTIES EXTERNAL_OBJECT "TRUE")\n')
# Output directory
target_output_directory = spec.get('product_dir')
if target_output_directory is None:
if target_type in ('executable', 'loadable_module'):
target_output_directory = generator_default_variables['PRODUCT_DIR']
elif target_type == 'shared_library':
target_output_directory = '${builddir}/lib.${TOOLSET}'
elif spec.get('standalone_static_library', False):
target_output_directory = generator_default_variables['PRODUCT_DIR']
else:
base_path = gyp.common.RelativePath(os.path.dirname(gyp_file),
options.toplevel_dir)
target_output_directory = '${obj}.${TOOLSET}'
target_output_directory = (
os.path.join(target_output_directory, base_path))
cmake_target_output_directory = NormjoinPathForceCMakeSource(
path_from_cmakelists_to_gyp,
target_output_directory)
SetTargetProperty(output,
cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_DIRECTORY',
cmake_target_output_directory)
# Output name
default_product_prefix = ''
default_product_name = target_name
default_product_ext = ''
if target_type == 'static_library':
static_library_prefix = generator_default_variables['STATIC_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
static_library_prefix)
default_product_prefix = static_library_prefix
default_product_ext = generator_default_variables['STATIC_LIB_SUFFIX']
elif target_type in ('loadable_module', 'shared_library'):
shared_library_prefix = generator_default_variables['SHARED_LIB_PREFIX']
default_product_name = RemovePrefix(default_product_name,
shared_library_prefix)
default_product_prefix = shared_library_prefix
default_product_ext = generator_default_variables['SHARED_LIB_SUFFIX']
elif target_type != 'executable':
print ('ERROR: What output file should be generated?',
'type', target_type, 'target', target_name)
product_prefix = spec.get('product_prefix', default_product_prefix)
product_name = spec.get('product_name', default_product_name)
product_ext = spec.get('product_extension')
if product_ext:
product_ext = '.' + product_ext
else:
product_ext = default_product_ext
SetTargetProperty(output, cmake_target_name, 'PREFIX', product_prefix)
SetTargetProperty(output, cmake_target_name,
cmake_target_type.property_modifier + '_OUTPUT_NAME',
product_name)
SetTargetProperty(output, cmake_target_name, 'SUFFIX', product_ext)
# Make the output of this target referenceable as a source.
cmake_target_output_basename = product_prefix + product_name + product_ext
cmake_target_output = os.path.join(cmake_target_output_directory,
cmake_target_output_basename)
SetFileProperty(output, cmake_target_output, 'GENERATED', ['TRUE'], '')
# Includes
includes = config.get('include_dirs')
if includes:
# This (target include directories) is what requires CMake 2.8.8
includes_name = cmake_target_name + '__include_dirs'
SetVariableList(output, includes_name,
[NormjoinPathForceCMakeSource(path_from_cmakelists_to_gyp, include)
for include in includes])
output.write('set_property(TARGET ')
output.write(cmake_target_name)
output.write(' APPEND PROPERTY INCLUDE_DIRECTORIES ')
WriteVariable(output, includes_name, '')
output.write(')\n')
# Defines
defines = config.get('defines')
if defines is not None:
SetTargetProperty(output,
cmake_target_name,
'COMPILE_DEFINITIONS',
defines,
';')
# Compile Flags - http://www.cmake.org/Bug/view.php?id=6493
# CMake currently does not have target C and CXX flags.
# So, instead of doing...
# cflags_c = config.get('cflags_c')
# if cflags_c is not None:
# SetTargetProperty(output, cmake_target_name,
# 'C_COMPILE_FLAGS', cflags_c, ' ')
# cflags_cc = config.get('cflags_cc')
# if cflags_cc is not None:
# SetTargetProperty(output, cmake_target_name,
# 'CXX_COMPILE_FLAGS', cflags_cc, ' ')
# Instead we must...
cflags = config.get('cflags', [])
cflags_c = config.get('cflags_c', [])
cflags_cxx = config.get('cflags_cc', [])
if (not cflags_c or not c_sources) and (not cflags_cxx or not cxx_sources):
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', cflags, ' ')
elif c_sources and not (s_sources or cxx_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
elif cxx_sources and not (s_sources or c_sources):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetTargetProperty(output, cmake_target_name, 'COMPILE_FLAGS', flags, ' ')
else:
# TODO: This is broken, one cannot generally set properties on files,
# as other targets may require different properties on the same files.
if s_sources and cflags:
SetFilesProperty(output, s_sources_name, 'COMPILE_FLAGS', cflags, ' ')
if c_sources and (cflags or cflags_c):
flags = []
flags.extend(cflags)
flags.extend(cflags_c)
SetFilesProperty(output, c_sources_name, 'COMPILE_FLAGS', flags, ' ')
if cxx_sources and (cflags or cflags_cxx):
flags = []
flags.extend(cflags)
flags.extend(cflags_cxx)
SetFilesProperty(output, cxx_sources_name, 'COMPILE_FLAGS', flags, ' ')
# Linker flags
ldflags = config.get('ldflags')
if ldflags is not None:
SetTargetProperty(output, cmake_target_name, 'LINK_FLAGS', ldflags, ' ')
# Note on Dependencies and Libraries:
# CMake wants to handle link order, resolving the link line up front.
# Gyp does not retain or enforce specifying enough information to do so.
# So do as other gyp generators and use --start-group and --end-group.
# Give CMake as little information as possible so that it doesn't mess it up.
# Dependencies
rawDeps = spec.get('dependencies', [])
static_deps = []
shared_deps = []
other_deps = []
for rawDep in rawDeps:
dep_cmake_name = namer.CreateCMakeTargetName(rawDep)
dep_spec = target_dicts.get(rawDep, {})
dep_target_type = dep_spec.get('type', None)
if dep_target_type == 'static_library':
static_deps.append(dep_cmake_name)
elif dep_target_type == 'shared_library':
shared_deps.append(dep_cmake_name)
else:
other_deps.append(dep_cmake_name)
# ensure all external dependencies are complete before internal dependencies
# extra_deps currently only depend on their own deps, so otherwise run early
if static_deps or shared_deps or other_deps:
for extra_dep in extra_deps:
output.write('add_dependencies(')
output.write(extra_dep)
output.write('\n')
for deps in (static_deps, shared_deps, other_deps):
for dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(dep)
output.write('\n')
output.write(')\n')
linkable = target_type in ('executable', 'loadable_module', 'shared_library')
other_deps.extend(extra_deps)
if other_deps or (not linkable and (static_deps or shared_deps)):
output.write('add_dependencies(')
output.write(cmake_target_name)
output.write('\n')
for dep in gyp.common.uniquer(other_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if not linkable:
for deps in (static_deps, shared_deps):
for lib_dep in gyp.common.uniquer(deps):
output.write(' ')
output.write(lib_dep)
output.write('\n')
output.write(')\n')
# Libraries
if linkable:
external_libs = [lib for lib in spec.get('libraries', []) if len(lib) > 0]
if external_libs or static_deps or shared_deps:
output.write('target_link_libraries(')
output.write(cmake_target_name)
output.write('\n')
if static_deps:
write_group = circular_libs and len(static_deps) > 1
if write_group:
output.write('-Wl,--start-group\n')
for dep in gyp.common.uniquer(static_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if write_group:
output.write('-Wl,--end-group\n')
if shared_deps:
for dep in gyp.common.uniquer(shared_deps):
output.write(' ')
output.write(dep)
output.write('\n')
if external_libs:
for lib in gyp.common.uniquer(external_libs):
output.write(' ')
output.write(lib)
output.write('\n')
output.write(')\n')
UnsetVariable(output, 'TOOLSET')
UnsetVariable(output, 'TARGET')
def GenerateOutputForConfig(target_list, target_dicts, data,
params, config_to_use):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
# Each Gyp configuration creates a different CMakeLists.txt file
# to avoid incompatibilities between Gyp and CMake configurations.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_to_use))
toplevel_build = os.path.join(options.toplevel_dir, build_dir)
output_file = os.path.join(toplevel_build, 'CMakeLists.txt')
gyp.common.EnsureDirExists(output_file)
output = open(output_file, 'w')
output.write('cmake_minimum_required(VERSION 2.8.8 FATAL_ERROR)\n')
output.write('cmake_policy(VERSION 2.8.8)\n')
gyp_file, project_target, _ = gyp.common.ParseQualifiedTarget(target_list[-1])
output.write('project(')
output.write(project_target)
output.write(')\n')
SetVariable(output, 'configuration', config_to_use)
ar = None
cc = None
cxx = None
make_global_settings = data[gyp_file].get('make_global_settings', [])
build_to_top = gyp.common.InvertRelativePath(build_dir,
options.toplevel_dir)
for key, value in make_global_settings:
if key == 'AR':
ar = os.path.join(build_to_top, value)
if key == 'CC':
cc = os.path.join(build_to_top, value)
if key == 'CXX':
cxx = os.path.join(build_to_top, value)
ar = gyp.common.GetEnvironFallback(['AR_target', 'AR'], ar)
cc = gyp.common.GetEnvironFallback(['CC_target', 'CC'], cc)
cxx = gyp.common.GetEnvironFallback(['CXX_target', 'CXX'], cxx)
if ar:
SetVariable(output, 'CMAKE_AR', ar)
if cc:
SetVariable(output, 'CMAKE_C_COMPILER', cc)
if cxx:
SetVariable(output, 'CMAKE_CXX_COMPILER', cxx)
# The following appears to be as-yet undocumented.
# http://public.kitware.com/Bug/view.php?id=8392
output.write('enable_language(ASM)\n')
# ASM-ATT does not support .S files.
# output.write('enable_language(ASM-ATT)\n')
if cc:
SetVariable(output, 'CMAKE_ASM_COMPILER', cc)
SetVariable(output, 'builddir', '${CMAKE_CURRENT_BINARY_DIR}')
SetVariable(output, 'obj', '${builddir}/obj')
output.write('\n')
# TODO: Undocumented/unsupported (the CMake Java generator depends on it).
# CMake by default names the object resulting from foo.c to be foo.c.o.
# Gyp traditionally names the object resulting from foo.c foo.o.
# This should be irrelevant, but some targets extract .o files from .a
# and depend on the name of the extracted .o files.
output.write('set(CMAKE_C_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('set(CMAKE_CXX_OUTPUT_EXTENSION_REPLACE 1)\n')
output.write('\n')
# Force ninja to use rsp files. Otherwise link and ar lines can get too long,
# resulting in 'Argument list too long' errors.
output.write('set(CMAKE_NINJA_FORCE_RESPONSE_FILE 1)\n')
output.write('\n')
namer = CMakeNamer(target_list)
# The list of targets upon which the 'all' target should depend.
# CMake has it's own implicit 'all' target, one is not created explicitly.
all_qualified_targets = set()
for build_file in params['build_files']:
for qualified_target in gyp.common.AllTargets(target_list,
target_dicts,
os.path.normpath(build_file)):
all_qualified_targets.add(qualified_target)
for qualified_target in target_list:
WriteTarget(namer, qualified_target, target_dicts, build_dir, config_to_use,
options, generator_flags, all_qualified_targets, output)
output.close()
def PerformBuild(data, configurations, params):
options = params['options']
generator_flags = params['generator_flags']
# generator_dir: relative path from pwd to where make puts build files.
# Makes migrating from make to cmake easier, cmake doesn't put anything here.
generator_dir = os.path.relpath(options.generator_output or '.')
# output_dir: relative path from generator_dir to the build directory.
output_dir = generator_flags.get('output_dir', 'out')
for config_name in configurations:
# build_dir: relative path from source root to our output files.
# e.g. "out/Debug"
build_dir = os.path.normpath(os.path.join(generator_dir,
output_dir,
config_name))
arguments = ['cmake', '-G', 'Ninja']
print 'Generating [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments, cwd=build_dir)
arguments = ['ninja', '-C', build_dir]
print 'Building [%s]: %s' % (config_name, arguments)
subprocess.check_call(arguments)
def CallGenerateOutputForConfig(arglist):
# Ignore the interrupt signal so that the parent process catches it and
# kills all multiprocessing children.
signal.signal(signal.SIGINT, signal.SIG_IGN)
target_list, target_dicts, data, params, config_name = arglist
GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
def GenerateOutput(target_list, target_dicts, data, params):
user_config = params.get('generator_flags', {}).get('config', None)
if user_config:
GenerateOutputForConfig(target_list, target_dicts, data,
params, user_config)
else:
config_names = target_dicts[target_list[0]]['configurations'].keys()
if params['parallel']:
try:
pool = multiprocessing.Pool(len(config_names))
arglists = []
for config_name in config_names:
arglists.append((target_list, target_dicts, data,
params, config_name))
pool.map(CallGenerateOutputForConfig, arglists)
except KeyboardInterrupt, e:
pool.terminate()
raise e
else:
for config_name in config_names:
GenerateOutputForConfig(target_list, target_dicts, data,
params, config_name)
|
mit
|
flit/pyOCD
|
pyocd/rtos/freertos.py
|
2
|
20706
|
# pyOCD debugger
# Copyright (c) 2016-2020 Arm Limited
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .provider import (TargetThread, ThreadProvider)
from .common import (read_c_string, HandlerModeThread, EXC_RETURN_EXT_FRAME_MASK)
from ..core import exceptions
from ..core.target import Target
from ..core.plugin import Plugin
from ..debug.context import DebugContext
from ..coresight.cortex_m_core_registers import index_for_reg
import logging
FREERTOS_MAX_PRIORITIES = 63
LIST_SIZE = 20
LIST_INDEX_OFFSET = 16
LIST_NODE_NEXT_OFFSET = 8 # 4?
LIST_NODE_OBJECT_OFFSET = 12
THREAD_STACK_POINTER_OFFSET = 0
THREAD_PRIORITY_OFFSET = 44
THREAD_NAME_OFFSET = 52
# Create a logger for this module.
LOG = logging.getLogger(__name__)
class TargetList(object):
def __init__(self, context, ptr):
self._context = context
self._list = ptr
def __iter__(self):
prev = -1
found = 0
count = self._context.read32(self._list)
if count == 0:
return
node = self._context.read32(self._list + LIST_INDEX_OFFSET)
while (node != 0) and (node != prev) and (found < count):
try:
# Read the object from the node.
obj = self._context.read32(node + LIST_NODE_OBJECT_OFFSET)
yield obj
found += 1
# Read next list node pointer.
prev = node
node = self._context.read32(node + LIST_NODE_NEXT_OFFSET)
except exceptions.TransferError:
LOG.warning("TransferError while reading list elements (list=0x%08x, node=0x%08x), terminating list", self._list, node)
node = 0
class FreeRTOSThreadContext(DebugContext):
"""! @brief Thread context for FreeRTOS."""
# SP/PSP are handled specially, so it is not in these dicts.
COMMON_REGISTER_OFFSETS = {
4: 0, # r4
5: 4, # r5
6: 8, # r6
7: 12, # r7
8: 16, # r8
9: 20, # r9
10: 24, # r10
11: 28, # r11
}
NOFPU_REGISTER_OFFSETS = {
0: 32, # r0
1: 36, # r1
2: 40, # r2
3: 44, # r3
12: 48, # r12
14: 52, # lr
15: 56, # pc
16: 60, # xpsr
}
NOFPU_REGISTER_OFFSETS.update(COMMON_REGISTER_OFFSETS)
FPU_BASIC_REGISTER_OFFSETS = {
-1: 32, # exception LR
0: 36, # r0
1: 40, # r1
2: 44, # r2
3: 48, # r3
12: 52, # r12
14: 56, # lr
15: 60, # pc
16: 64, # xpsr
}
FPU_BASIC_REGISTER_OFFSETS.update(COMMON_REGISTER_OFFSETS)
FPU_EXTENDED_REGISTER_OFFSETS = {
-1: 32, # exception LR
0x50: 36, # s16
0x51: 40, # s17
0x52: 44, # s18
0x53: 48, # s19
0x54: 52, # s20
0x55: 56, # s21
0x56: 60, # s22
0x57: 64, # s23
0x58: 68, # s24
0x59: 72, # s25
0x5a: 76, # s26
0x5b: 80, # s27
0x5c: 84, # s28
0x5d: 88, # s29
0x5e: 92, # s30
0x5f: 96, # s31
0: 100, # r0
1: 104, # r1
2: 108, # r2
3: 112, # r3
12: 116, # r12
14: 120, # lr
15: 124, # pc
16: 128, # xpsr
0x40: 132, # s0
0x41: 136, # s1
0x42: 140, # s2
0x43: 144, # s3
0x44: 148, # s4
0x45: 152, # s5
0x46: 156, # s6
0x47: 160, # s7
0x48: 164, # s8
0x49: 168, # s9
0x4a: 172, # s10
0x4b: 176, # s11
0x4c: 180, # s12
0x4d: 184, # s13
0x4e: 188, # s14
0x4f: 192, # s15
33: 196, # fpscr
# (reserved word: 200)
}
FPU_EXTENDED_REGISTER_OFFSETS.update(COMMON_REGISTER_OFFSETS)
def __init__(self, parent, thread):
super(FreeRTOSThreadContext, self).__init__(parent)
self._thread = thread
self._has_fpu = self.core.has_fpu
def read_core_registers_raw(self, reg_list):
reg_list = [index_for_reg(reg) for reg in reg_list]
reg_vals = []
isCurrent = self._thread.is_current
inException = isCurrent and self._parent.read_core_register('ipsr') > 0
# If this is the current thread and we're not in an exception, just read the live registers.
if isCurrent and not inException:
return self._parent.read_core_registers_raw(reg_list)
# Because of above tests, from now on, inException implies isCurrent;
# we are generating the thread view for the RTOS thread where the
# exception occurred; the actual Handler Mode thread view is produced
# by HandlerModeThread
if inException:
# Reasonable to assume PSP is still valid
sp = self._parent.read_core_register('psp')
else:
sp = self._thread.get_stack_pointer()
# Determine which register offset table to use and the offsets past the saved state.
hwStacked = 0x20
swStacked = 0x20
table = self.NOFPU_REGISTER_OFFSETS
if self._has_fpu:
try:
if inException and self.core.is_vector_catch():
# Vector catch has just occurred, take live LR
exceptionLR = self._parent.read_core_register('lr')
else:
# Read stacked exception return LR.
offset = self.FPU_BASIC_REGISTER_OFFSETS[-1]
exceptionLR = self._parent.read32(sp + offset)
# Check bit 4 of the saved exception LR to determine if FPU registers were stacked.
if (exceptionLR & EXC_RETURN_EXT_FRAME_MASK) != 0:
table = self.FPU_BASIC_REGISTER_OFFSETS
swStacked = 0x24
else:
table = self.FPU_EXTENDED_REGISTER_OFFSETS
hwStacked = 0x68
swStacked = 0x64
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's saved LR")
for reg in reg_list:
# Must handle stack pointer specially.
if reg == 13:
if inException:
reg_vals.append(sp + hwStacked)
else:
reg_vals.append(sp + swStacked + hwStacked)
continue
# Look up offset for this register on the stack.
spOffset = table.get(reg, None)
if spOffset is None:
reg_vals.append(self._parent.read_core_register_raw(reg))
continue
if inException:
spOffset -= swStacked
try:
if spOffset >= 0:
reg_vals.append(self._parent.read32(sp + spOffset))
else:
# Not available - try live one
reg_vals.append(self._parent.read_core_register_raw(reg))
except exceptions.TransferError:
reg_vals.append(0)
return reg_vals
class FreeRTOSThread(TargetThread):
"""! @brief A FreeRTOS task."""
RUNNING = 1
READY = 2
BLOCKED = 3
SUSPENDED = 4
DELETED = 5
STATE_NAMES = {
RUNNING : "Running",
READY : "Ready",
BLOCKED : "Blocked",
SUSPENDED : "Suspended",
DELETED : "Deleted",
}
def __init__(self, targetContext, provider, base):
super(FreeRTOSThread, self).__init__()
self._target_context = targetContext
self._provider = provider
self._base = base
self._state = FreeRTOSThread.READY
self._thread_context = FreeRTOSThreadContext(self._target_context, self)
self._priority = self._target_context.read32(self._base + THREAD_PRIORITY_OFFSET)
self._name = read_c_string(self._target_context, self._base + THREAD_NAME_OFFSET)
if len(self._name) == 0:
self._name = "Unnamed"
def get_stack_pointer(self):
# Get stack pointer saved in thread struct.
try:
return self._target_context.read32(self._base + THREAD_STACK_POINTER_OFFSET)
except exceptions.TransferError:
LOG.debug("Transfer error while reading thread's stack pointer @ 0x%08x", self._base + THREAD_STACK_POINTER_OFFSET)
return 0
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
@property
def priority(self):
return self._priority
@property
def unique_id(self):
return self._base
@property
def name(self):
return self._name
@property
def description(self):
return "%s; Priority %d" % (self.STATE_NAMES[self.state], self.priority)
@property
def is_current(self):
return self._provider.get_actual_current_thread_id() == self.unique_id
@property
def context(self):
return self._thread_context
def __str__(self):
return "<FreeRTOSThread@0x%08x id=%x name=%s>" % (id(self), self.unique_id, self.name)
def __repr__(self):
return str(self)
class FreeRTOSThreadProvider(ThreadProvider):
"""! @brief Thread provider for FreeRTOS."""
## Required FreeRTOS symbols.
FREERTOS_SYMBOLS = [
"uxCurrentNumberOfTasks",
"pxCurrentTCB",
"pxReadyTasksLists",
"xDelayedTaskList1",
"xDelayedTaskList2",
"xPendingReadyList",
"uxTopReadyPriority",
"xSchedulerRunning",
]
def __init__(self, target):
super(FreeRTOSThreadProvider, self).__init__(target)
self._symbols = None
self._total_priorities = 0
self._threads = {}
def init(self, symbolProvider):
# Lookup required symbols.
self._symbols = self._lookup_symbols(self.FREERTOS_SYMBOLS, symbolProvider)
if self._symbols is None:
return False
# Look up optional xSuspendedTaskList, controlled by INCLUDE_vTaskSuspend
suspendedTaskListSym = self._lookup_symbols(["xSuspendedTaskList"], symbolProvider)
if suspendedTaskListSym is not None:
self._symbols['xSuspendedTaskList'] = suspendedTaskListSym['xSuspendedTaskList']
# Look up optional xTasksWaitingTermination, controlled by INCLUDE_vTaskDelete
tasksWaitingTerminationSym = self._lookup_symbols(["xTasksWaitingTermination"], symbolProvider)
if tasksWaitingTerminationSym is not None:
self._symbols['xTasksWaitingTermination'] = tasksWaitingTerminationSym['xTasksWaitingTermination']
# Look up vPortEnableVFP() to determine if the FreeRTOS port supports the FPU.
vPortEnableVFP = self._lookup_symbols(["vPortEnableVFP"], symbolProvider)
self._fpu_port = vPortEnableVFP is not None
elfOptHelp = " Try using the --elf option." if self._target.elf is None else ""
# Check for the expected list size. These two symbols are each a single list and xDelayedTaskList2
# immediately follows xDelayedTaskList1, so we can just subtract their addresses to get the
# size of a single list.
delta = self._symbols['xDelayedTaskList2'] - self._symbols['xDelayedTaskList1']
delta = self._get_elf_symbol_size('xDelayedTaskList1', self._symbols['xDelayedTaskList1'], delta)
if delta != LIST_SIZE:
LOG.warning("FreeRTOS: list size is unexpected, maybe an unsupported configuration of FreeRTOS." + elfOptHelp)
return False
# xDelayedTaskList1 immediately follows pxReadyTasksLists, so subtracting their addresses gives
# us the total size of the pxReadyTaskLists array. But not trustworthy. Compiler can rearrange things
delta = self._symbols['xDelayedTaskList1'] - self._symbols['pxReadyTasksLists']
delta = self._get_elf_symbol_size('pxReadyTasksLists', self._symbols['pxReadyTasksLists'], delta);
if delta % LIST_SIZE:
LOG.warning("FreeRTOS: pxReadyTasksLists size is unexpected, maybe an unsupported version of FreeRTOS." + elfOptHelp)
return False
self._total_priorities = delta // LIST_SIZE
if self._total_priorities > FREERTOS_MAX_PRIORITIES:
LOG.warning("FreeRTOS: number of priorities is too large (%d)." + elfOptHelp, self._total_priorities)
return False
LOG.debug("FreeRTOS: number of priorities is %d", self._total_priorities)
self._target.session.subscribe(self.event_handler, Target.Event.POST_FLASH_PROGRAM)
self._target.session.subscribe(self.event_handler, Target.Event.POST_RESET)
return True
def invalidate(self):
self._threads = {}
def event_handler(self, notification):
# Invalidate threads list if flash is reprogrammed.
LOG.debug("FreeRTOS: invalidating threads list: %s" % (repr(notification)))
self.invalidate();
def _build_thread_list(self):
newThreads = {}
# Read the number of threads.
threadCount = self._target_context.read32(self._symbols['uxCurrentNumberOfTasks'])
# Read the current thread.
currentThread = self._target_context.read32(self._symbols['pxCurrentTCB'])
# We should only be building the thread list if the scheduler is running, so a zero thread
# count or a null current thread means something is bizarrely wrong.
if threadCount == 0 or currentThread == 0:
LOG.warning("FreeRTOS: no threads even though the scheduler is running")
return
# Read the top ready priority.
topPriority = self._target_context.read32(self._symbols['uxTopReadyPriority'])
# Handle an uxTopReadyPriority value larger than the number of lists. This is most likely
# caused by the configUSE_PORT_OPTIMISED_TASK_SELECTION option being enabled, which treats
# uxTopReadyPriority as a bitmap instead of integer. This is ok because uxTopReadyPriority
# in optimised mode will always be >= the actual top priority.
if topPriority >= self._total_priorities:
topPriority = self._total_priorities - 1
# Build up list of all the thread lists we need to scan.
listsToRead = []
for i in range(topPriority + 1):
listsToRead.append((self._symbols['pxReadyTasksLists'] + i * LIST_SIZE, FreeRTOSThread.READY))
listsToRead.append((self._symbols['xDelayedTaskList1'], FreeRTOSThread.BLOCKED))
listsToRead.append((self._symbols['xDelayedTaskList2'], FreeRTOSThread.BLOCKED))
listsToRead.append((self._symbols['xPendingReadyList'], FreeRTOSThread.READY))
if 'xSuspendedTaskList' in self._symbols:
listsToRead.append((self._symbols['xSuspendedTaskList'], FreeRTOSThread.SUSPENDED))
if 'xTasksWaitingTermination' in self._symbols:
listsToRead.append((self._symbols['xTasksWaitingTermination'], FreeRTOSThread.DELETED))
for listPtr, state in listsToRead:
for threadBase in TargetList(self._target_context, listPtr):
try:
# Don't try adding more threads than the number of threads that FreeRTOS says there are.
if len(newThreads) >= threadCount:
break
# Reuse existing thread objects.
if threadBase in self._threads:
t = self._threads[threadBase]
else:
t = FreeRTOSThread(self._target_context, self, threadBase)
# Set thread state.
if threadBase == currentThread:
t.state = FreeRTOSThread.RUNNING
else:
t.state = state
LOG.debug("Thread 0x%08x (%s)", threadBase, t.name)
newThreads[t.unique_id] = t
except exceptions.TransferError:
LOG.debug("TransferError while examining thread 0x%08x", threadBase)
if len(newThreads) != threadCount:
LOG.warning("FreeRTOS: thread count mismatch")
# Create fake handler mode thread.
if self._target_context.read_core_register('ipsr') > 0:
LOG.debug("FreeRTOS: creating handler mode thread")
t = HandlerModeThread(self._target_context, self)
newThreads[t.unique_id] = t
self._threads = newThreads
def get_threads(self):
if not self.is_enabled:
return []
self.update_threads()
return list(self._threads.values())
def get_thread(self, threadId):
if not self.is_enabled:
return None
self.update_threads()
return self._threads.get(threadId, None)
@property
def is_enabled(self):
return self._symbols is not None and self.get_is_running()
@property
def current_thread(self):
if not self.is_enabled:
return None
self.update_threads()
id = self.get_current_thread_id()
try:
return self._threads[id]
except KeyError:
return None
def is_valid_thread_id(self, threadId):
if not self.is_enabled:
return False
self.update_threads()
return threadId in self._threads
def get_current_thread_id(self):
if not self.is_enabled:
return None
if self._target_context.read_core_register('ipsr') > 0:
return HandlerModeThread.UNIQUE_ID
return self.get_actual_current_thread_id()
def get_actual_current_thread_id(self):
if not self.is_enabled:
return None
return self._target_context.read32(self._symbols['pxCurrentTCB'])
def get_is_running(self):
if self._symbols is None:
return False
return self._target_context.read32(self._symbols['xSchedulerRunning']) != 0
def _get_elf_symbol_size(self, name, addr, calculated_size):
if self._target.elf is not None:
symInfo = None
try:
symInfo = self._target.elf.symbol_decoder.get_symbol_for_name(name)
except RuntimeError as e:
LOG.error("FreeRTOS elf symbol query failed for (%s) with an exception. " + str(e),
name, exc_info=self._target.session.log_tracebacks)
# Simple checks to make sure gdb is looking at the same executable we are
if symInfo is None:
LOG.debug("FreeRTOS symbol '%s' not found in elf file", name)
elif symInfo.address != addr:
LOG.debug("FreeRTOS symbol '%s' address mismatch elf=0x%08x, gdb=0x%08x", name, symInfo.address, addr)
else:
if calculated_size != symInfo.size:
LOG.info("FreeRTOS symbol '%s' size from elf (%ld) != calculated size (%ld). Using elf value.",
name, symInfo.size, calculated_size)
else:
LOG.debug("FreeRTOS symbol '%s' size (%ld) from elf file matches calculated value", name, calculated_size)
return symInfo.size
return calculated_size
class FreeRTOSPlugin(Plugin):
"""! @brief Plugin class for FreeRTOS."""
def load(self):
return FreeRTOSThreadProvider
@property
def name(self):
return "freertos"
@property
def description(self):
return "FreeRTOS"
|
apache-2.0
|
spaceone/mils-secure
|
app/console/app/pygments/styles/manni.py
|
24
|
2379
|
# -*- coding: utf-8 -*-
"""
pygments.styles.manni
~~~~~~~~~~~~~~~~~~~~~
A colorful style, inspired by the terminal highlighting style.
This is a port of the style used in the `php port`_ of pygments
by Manni. The style is called 'default' there.
:copyright: 2006-2007 by Armin Ronacher, Manni <[email protected]>.
:license: BSD, see LICENSE for more details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class ManniStyle(Style):
"""
A colorful style, inspired by the terminal highlighting style.
"""
background_color = '#f0f3f3'
styles = {
Whitespace: '#bbbbbb',
Comment: 'italic #0099FF',
Comment.Preproc: 'noitalic #009999',
Comment.Special: 'bold',
Keyword: 'bold #006699',
Keyword.Pseudo: 'nobold',
Keyword.Type: '#007788',
Operator: '#555555',
Operator.Word: 'bold #000000',
Name.Builtin: '#336666',
Name.Function: '#CC00FF',
Name.Class: 'bold #00AA88',
Name.Namespace: 'bold #00CCFF',
Name.Exception: 'bold #CC0000',
Name.Variable: '#003333',
Name.Constant: '#336600',
Name.Label: '#9999FF',
Name.Entity: 'bold #999999',
Name.Attribute: '#330099',
Name.Tag: 'bold #330099',
Name.Decorator: '#9999FF',
String: '#CC3300',
String.Doc: 'italic',
String.Interpol: '#AA0000',
String.Escape: 'bold #CC3300',
String.Regex: '#33AAAA',
String.Symbol: '#FFCC33',
String.Other: '#CC3300',
Number: '#FF6600',
Generic.Heading: 'bold #003300',
Generic.Subheading: 'bold #003300',
Generic.Deleted: 'border:#CC0000 bg:#FFCCCC',
Generic.Inserted: 'border:#00CC00 bg:#CCFFCC',
Generic.Error: '#FF0000',
Generic.Emph: 'italic',
Generic.Strong: 'bold',
Generic.Prompt: 'bold #000099',
Generic.Output: '#AAAAAA',
Generic.Traceback: '#99CC66',
Error: 'bg:#FFAAAA #AA0000'
}
|
mit
|
elmer/smart-gitosis
|
gitosis/gitdaemon.py
|
24
|
2451
|
import errno
import logging
import os
from ConfigParser import NoSectionError, NoOptionError
log = logging.getLogger('gitosis.gitdaemon')
from gitosis import util
def export_ok_path(repopath):
p = os.path.join(repopath, 'git-daemon-export-ok')
return p
def allow_export(repopath):
p = export_ok_path(repopath)
file(p, 'a').close()
def deny_export(repopath):
p = export_ok_path(repopath)
try:
os.unlink(p)
except OSError, e:
if e.errno == errno.ENOENT:
pass
else:
raise
def _extract_reldir(topdir, dirpath):
if topdir == dirpath:
return '.'
prefix = topdir + '/'
assert dirpath.startswith(prefix)
reldir = dirpath[len(prefix):]
return reldir
def set_export_ok(config):
repositories = util.getRepositoryDir(config)
try:
global_enable = config.getboolean('gitosis', 'daemon')
except (NoSectionError, NoOptionError):
global_enable = False
log.debug(
'Global default is %r',
{True: 'allow', False: 'deny'}.get(global_enable),
)
def _error(e):
if e.errno == errno.ENOENT:
pass
else:
raise e
for (dirpath, dirnames, filenames) \
in os.walk(repositories, onerror=_error):
# oh how many times i have wished for os.walk to report
# topdir and reldir separately, instead of dirpath
reldir = _extract_reldir(
topdir=repositories,
dirpath=dirpath,
)
log.debug('Walking %r, seeing %r', reldir, dirnames)
to_recurse = []
repos = []
for dirname in dirnames:
if dirname.endswith('.git'):
repos.append(dirname)
else:
to_recurse.append(dirname)
dirnames[:] = to_recurse
for repo in repos:
name, ext = os.path.splitext(repo)
if reldir != '.':
name = os.path.join(reldir, name)
assert ext == '.git'
try:
enable = config.getboolean('repo %s' % name, 'daemon')
except (NoSectionError, NoOptionError):
enable = global_enable
if enable:
log.debug('Allow %r', name)
allow_export(os.path.join(dirpath, repo))
else:
log.debug('Deny %r', name)
deny_export(os.path.join(dirpath, repo))
|
gpl-2.0
|
RuralHunter/showtime
|
ext/libyuv/tools/valgrind-libyuv/libyuv_tests.py
|
13
|
5222
|
#!/usr/bin/env python
# Copyright (c) 2012 The LibYuv Project Authors. All rights reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
"""Runs various libyuv tests through valgrind_test.py.
This script inherits the chrome_tests.py in Chrome, but allows running any test
instead of only the hard-coded ones. It uses the -t cmdline flag to do this, and
only supports specifying a single test for each run.
Suppression files:
The Chrome valgrind directory we use as a DEPS dependency contains the following
suppression files:
valgrind/memcheck/suppressions.txt
valgrind/memcheck/suppressions_mac.txt
valgrind/tsan/suppressions.txt
valgrind/tsan/suppressions_mac.txt
valgrind/tsan/suppressions_win32.txt
Since they're referenced from the chrome_tests.py script, we have similar files
below the directory of this script. When executing, this script will setup both
Chrome's suppression files and our own, so we can easily maintain libyuv
specific suppressions in our own files.
"""
import logging
import optparse
import os
import sys
import logging_utils
import path_utils
import chrome_tests
class LibyuvTest(chrome_tests.ChromeTests):
"""Class that handles setup of suppressions for libyuv.
Everything else is inherited from chrome_tests.ChromeTests.
"""
def _DefaultCommand(self, tool, exe=None, valgrind_test_args=None):
"""Override command-building method so we can add more suppressions."""
cmd = chrome_tests.ChromeTests._DefaultCommand(self, tool, exe,
valgrind_test_args)
# When ChromeTests._DefaultCommand has executed, it has setup suppression
# files based on what's found in the memcheck/ or tsan/ subdirectories of
# this script's location. If Mac or Windows is executing, additional
# platform specific files have also been added.
# Since only the ones located below this directory is added, we must also
# add the ones maintained by Chrome, located in ../valgrind.
# The idea is to look for --suppression arguments in the cmd list and add a
# modified copy of each suppression file, for the corresponding file in
# ../valgrind. If we would simply replace 'valgrind-libyuv' with 'valgrind'
# we may produce invalid paths if other parts of the path contain that
# string. That's why the code below only replaces the end of the path.
script_dir = path_utils.ScriptDir()
old_base, _ = os.path.split(script_dir)
new_dir = os.path.join(old_base, 'valgrind')
add_suppressions = []
for token in cmd:
if '--suppressions' in token:
add_suppressions.append(token.replace(script_dir, new_dir))
return add_suppressions + cmd
def main(_):
parser = optparse.OptionParser('usage: %prog -b <dir> -t <test> <test args>')
parser.disable_interspersed_args()
parser.add_option('-b', '--build_dir',
help=('Location of the compiler output. Can only be used '
'when the test argument does not contain this path.'))
parser.add_option('-t', '--test', help='Test to run.')
parser.add_option('', '--baseline', action='store_true', default=False,
help='Generate baseline data instead of validating')
parser.add_option('', '--gtest_filter',
help='Additional arguments to --gtest_filter')
parser.add_option('', '--gtest_repeat',
help='Argument for --gtest_repeat')
parser.add_option('-v', '--verbose', action='store_true', default=False,
help='Verbose output - enable debug log messages')
parser.add_option('', '--tool', dest='valgrind_tool', default='memcheck',
help='Specify a valgrind tool to run the tests under')
parser.add_option('', '--tool_flags', dest='valgrind_tool_flags', default='',
help='Specify custom flags for the selected valgrind tool')
parser.add_option('', '--keep_logs', action='store_true', default=False,
help=('Store memory tool logs in the <tool>.logs directory '
'instead of /tmp.\nThis can be useful for tool '
'developers/maintainers.\nPlease note that the <tool>'
'.logs directory will be clobbered on tool startup.'))
options, args = parser.parse_args()
if options.verbose:
logging_utils.config_root(logging.DEBUG)
else:
logging_utils.config_root()
if not options.test:
parser.error('--test not specified')
# If --build_dir is provided, prepend it to the test executable if needed.
test_executable = options.test
if options.build_dir and not test_executable.startswith(options.build_dir):
test_executable = os.path.join(options.build_dir, test_executable)
args = [test_executable] + args
test = LibyuvTest(options, args, 'cmdline')
return test.Run()
if __name__ == '__main__':
return_code = main(sys.argv)
sys.exit(return_code)
|
gpl-3.0
|
wimnat/ansible
|
.azure-pipelines/scripts/combine-coverage.py
|
25
|
2174
|
#!/usr/bin/env python
"""
Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import shutil
import sys
def main():
"""Main program entry point."""
source_directory = sys.argv[1]
if '/ansible_collections/' in os.getcwd():
output_path = "tests/output"
else:
output_path = "test/results"
destination_directory = os.path.join(output_path, 'coverage')
if not os.path.exists(destination_directory):
os.makedirs(destination_directory)
jobs = {}
count = 0
for name in os.listdir(source_directory):
match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
label = match.group('label')
attempt = int(match.group('attempt'))
jobs[label] = max(attempt, jobs.get(label, 0))
for label, attempt in jobs.items():
name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
source = os.path.join(source_directory, name)
source_files = os.listdir(source)
for source_file in source_files:
source_path = os.path.join(source, source_file)
destination_path = os.path.join(destination_directory, source_file + '.' + label)
print('"%s" -> "%s"' % (source_path, destination_path))
shutil.copyfile(source_path, destination_path)
count += 1
print('Coverage file count: %d' % count)
print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
print('##vso[task.setVariable variable=outputPath]%s' % output_path)
if __name__ == '__main__':
main()
|
gpl-3.0
|
hill-a/stable-baselines
|
stable_baselines/acer/acer_simple.py
|
1
|
39128
|
import time
import warnings
import numpy as np
import tensorflow as tf
from gym.spaces import Discrete, Box
from collections import deque
from stable_baselines import logger
from stable_baselines.common.schedules import Scheduler
from stable_baselines.common.tf_util import batch_to_seq, seq_to_batch, \
check_shape, avg_norm, gradient_add, q_explained_variance, total_episode_reward_logger
from stable_baselines.acer.buffer import Buffer
from stable_baselines.common import ActorCriticRLModel, tf_util, SetVerbosity, TensorboardWriter
from stable_baselines.common.runners import AbstractEnvRunner
from stable_baselines.common.policies import ActorCriticPolicy, RecurrentActorCriticPolicy
# For ACER
def get_by_index(input_tensor, idx):
"""
Return the input tensor, offset by a certain value
:param input_tensor: (TensorFlow Tensor) The input tensor
:param idx: (int) The index offset
:return: (TensorFlow Tensor) the offset tensor
"""
assert len(input_tensor.get_shape()) == 2
assert len(idx.get_shape()) == 1
idx_flattened = tf.range(0, input_tensor.shape[0], dtype=tf.int64) * input_tensor.shape[1] + idx
offset_tensor = tf.gather(tf.reshape(input_tensor, [-1]), # flatten input
idx_flattened) # use flattened indices
return offset_tensor
def strip(var, n_envs, n_steps, flat=False):
"""
Removes the last step in the batch
:param var: (TensorFlow Tensor) The input Tensor
:param n_envs: (int) The number of environments
:param n_steps: (int) The number of steps to run for each environment
:param flat: (bool) If the input Tensor is flat
:return: (TensorFlow Tensor) the input tensor, without the last step in the batch
"""
out_vars = batch_to_seq(var, n_envs, n_steps + 1, flat)
return seq_to_batch(out_vars[:-1], flat)
def q_retrace(rewards, dones, q_i, values, rho_i, n_envs, n_steps, gamma):
"""
Calculates the target Q-retrace
:param rewards: ([TensorFlow Tensor]) The rewards
:param dones: ([TensorFlow Tensor])
:param q_i: ([TensorFlow Tensor]) The Q values for actions taken
:param values: ([TensorFlow Tensor]) The output of the value functions
:param rho_i: ([TensorFlow Tensor]) The importance weight for each action
:param n_envs: (int) The number of environments
:param n_steps: (int) The number of steps to run for each environment
:param gamma: (float) The discount value
:return: ([TensorFlow Tensor]) the target Q-retrace
"""
rho_bar = batch_to_seq(tf.minimum(1.0, rho_i), n_envs, n_steps, True) # list of len steps, shape [n_envs]
reward_seq = batch_to_seq(rewards, n_envs, n_steps, True) # list of len steps, shape [n_envs]
done_seq = batch_to_seq(dones, n_envs, n_steps, True) # list of len steps, shape [n_envs]
q_is = batch_to_seq(q_i, n_envs, n_steps, True)
value_sequence = batch_to_seq(values, n_envs, n_steps + 1, True)
final_value = value_sequence[-1]
qret = final_value
qrets = []
for i in range(n_steps - 1, -1, -1):
check_shape([qret, done_seq[i], reward_seq[i], rho_bar[i], q_is[i], value_sequence[i]], [[n_envs]] * 6)
qret = reward_seq[i] + gamma * qret * (1.0 - done_seq[i])
qrets.append(qret)
qret = (rho_bar[i] * (qret - q_is[i])) + value_sequence[i]
qrets = qrets[::-1]
qret = seq_to_batch(qrets, flat=True)
return qret
class EpisodeStats:
def __init__(self, n_steps, n_envs):
"""
Calculates the episode statistics
:param n_steps: (int) The number of steps to run for each environment
:param n_envs: (int) The number of environments
"""
self.episode_rewards = []
for _ in range(n_envs):
self.episode_rewards.append([])
self.len_buffer = deque(maxlen=40) # rolling buffer for episode lengths
self.rewbuffer = deque(maxlen=40) # rolling buffer for episode rewards
self.n_steps = n_steps
self.n_envs = n_envs
def feed(self, rewards, masks):
"""
Update the latest reward and mask
:param rewards: ([float]) The new rewards for the new step
:param masks: ([float]) The new masks for the new step
"""
rewards = np.reshape(rewards, [self.n_envs, self.n_steps])
masks = np.reshape(masks, [self.n_envs, self.n_steps])
for i in range(0, self.n_envs):
for j in range(0, self.n_steps):
self.episode_rewards[i].append(rewards[i][j])
if masks[i][j]:
reward_length = len(self.episode_rewards[i])
reward_sum = sum(self.episode_rewards[i])
self.len_buffer.append(reward_length)
self.rewbuffer.append(reward_sum)
self.episode_rewards[i] = []
def mean_length(self):
"""
Returns the average length of each episode
:return: (float)
"""
if self.len_buffer:
return np.mean(self.len_buffer)
else:
return 0 # on the first params dump, no episodes are finished
def mean_reward(self):
"""
Returns the average reward of each episode
:return: (float)
"""
if self.rewbuffer:
return np.mean(self.rewbuffer)
else:
return 0
class ACER(ActorCriticRLModel):
"""
The ACER (Actor-Critic with Experience Replay) model class, https://arxiv.org/abs/1611.01224
:param policy: (ActorCriticPolicy or str) The policy model to use (MlpPolicy, CnnPolicy, CnnLstmPolicy, ...)
:param env: (Gym environment or str) The environment to learn from (if registered in Gym, can be str)
:param gamma: (float) The discount value
:param n_steps: (int) The number of steps to run for each environment per update
(i.e. batch size is n_steps * n_env where n_env is number of environment copies running in parallel)
:param num_procs: (int) The number of threads for TensorFlow operations
.. deprecated:: 2.9.0
Use `n_cpu_tf_sess` instead.
:param q_coef: (float) The weight for the loss on the Q value
:param ent_coef: (float) The weight for the entropy loss
:param max_grad_norm: (float) The clipping value for the maximum gradient
:param learning_rate: (float) The initial learning rate for the RMS prop optimizer
:param lr_schedule: (str) The type of scheduler for the learning rate update ('linear', 'constant',
'double_linear_con', 'middle_drop' or 'double_middle_drop')
:param rprop_epsilon: (float) RMSProp epsilon (stabilizes square root computation in denominator of RMSProp update)
(default: 1e-5)
:param rprop_alpha: (float) RMSProp decay parameter (default: 0.99)
:param buffer_size: (int) The buffer size in number of steps
:param replay_ratio: (float) The number of replay learning per on policy learning on average,
using a poisson distribution
:param replay_start: (int) The minimum number of steps in the buffer, before learning replay
:param correction_term: (float) Importance weight clipping factor (default: 10)
:param trust_region: (bool) Whether or not algorithms estimates the gradient KL divergence
between the old and updated policy and uses it to determine step size (default: True)
:param alpha: (float) The decay rate for the Exponential moving average of the parameters
:param delta: (float) max KL divergence between the old policy and updated policy (default: 1)
:param verbose: (int) the verbosity level: 0 none, 1 training information, 2 tensorflow debug
:param tensorboard_log: (str) the log location for tensorboard (if None, no logging)
:param _init_setup_model: (bool) Whether or not to build the network at the creation of the instance
:param policy_kwargs: (dict) additional arguments to be passed to the policy on creation
:param full_tensorboard_log: (bool) enable additional logging when using tensorboard
WARNING: this logging can take a lot of space quickly
:param seed: (int) Seed for the pseudo-random generators (python, numpy, tensorflow).
If None (default), use random seed. Note that if you want completely deterministic
results, you must set `n_cpu_tf_sess` to 1.
:param n_cpu_tf_sess: (int) The number of threads for TensorFlow operations
If None, the number of cpu of the current machine will be used.
"""
def __init__(self, policy, env, gamma=0.99, n_steps=20, num_procs=None, q_coef=0.5, ent_coef=0.01, max_grad_norm=10,
learning_rate=7e-4, lr_schedule='linear', rprop_alpha=0.99, rprop_epsilon=1e-5, buffer_size=5000,
replay_ratio=4, replay_start=1000, correction_term=10.0, trust_region=True,
alpha=0.99, delta=1, verbose=0, tensorboard_log=None,
_init_setup_model=True, policy_kwargs=None,
full_tensorboard_log=False, seed=None, n_cpu_tf_sess=1):
if num_procs is not None:
warnings.warn("num_procs will be removed in a future version (v3.x.x) "
"use n_cpu_tf_sess instead", DeprecationWarning)
n_cpu_tf_sess = num_procs
self.n_steps = n_steps
self.replay_ratio = replay_ratio
self.buffer_size = buffer_size
self.replay_start = replay_start
self.gamma = gamma
self.alpha = alpha
self.correction_term = correction_term
self.q_coef = q_coef
self.ent_coef = ent_coef
self.trust_region = trust_region
self.delta = delta
self.max_grad_norm = max_grad_norm
self.rprop_alpha = rprop_alpha
self.rprop_epsilon = rprop_epsilon
self.learning_rate = learning_rate
self.lr_schedule = lr_schedule
self.tensorboard_log = tensorboard_log
self.full_tensorboard_log = full_tensorboard_log
self.action_ph = None
self.done_ph = None
self.reward_ph = None
self.mu_ph = None
self.learning_rate_ph = None
self.polyak_model = None
self.learning_rate_schedule = None
self.run_ops = None
self.names_ops = None
self.train_model = None
self.step_model = None
self.proba_step = None
self.n_act = None
self.n_batch = None
self.summary = None
super(ACER, self).__init__(policy=policy, env=env, verbose=verbose, requires_vec_env=True,
_init_setup_model=_init_setup_model, policy_kwargs=policy_kwargs,
seed=seed, n_cpu_tf_sess=n_cpu_tf_sess)
if _init_setup_model:
self.setup_model()
def _make_runner(self) -> AbstractEnvRunner:
return _Runner(env=self.env, model=self, n_steps=self.n_steps)
def _get_pretrain_placeholders(self):
policy = self.step_model
action_ph = policy.pdtype.sample_placeholder([None])
if isinstance(self.action_space, Discrete):
return policy.obs_ph, action_ph, policy.policy
raise NotImplementedError('Only discrete actions are supported for ACER for now')
def set_env(self, env):
if env is not None:
assert self.n_envs == env.num_envs, \
"Error: the environment passed must have the same number of environments as the model was trained on." \
"This is due to ACER not being capable of changing the number of environments."
super().set_env(env)
def setup_model(self):
with SetVerbosity(self.verbose):
assert issubclass(self.policy, ActorCriticPolicy), "Error: the input policy for the ACER model must be " \
"an instance of common.policies.ActorCriticPolicy."
if isinstance(self.action_space, Discrete):
self.n_act = self.action_space.n
continuous = False
elif isinstance(self.action_space, Box):
# self.n_act = self.action_space.shape[-1]
# continuous = True
raise NotImplementedError("WIP: Acer does not support Continuous actions yet.")
else:
raise ValueError("Error: ACER does not work with {} actions space.".format(self.action_space))
self.n_batch = self.n_envs * self.n_steps
self.graph = tf.Graph()
with self.graph.as_default():
self.sess = tf_util.make_session(num_cpu=self.n_cpu_tf_sess, graph=self.graph)
self.set_random_seed(self.seed)
n_batch_step = None
if issubclass(self.policy, RecurrentActorCriticPolicy):
n_batch_step = self.n_envs
n_batch_train = self.n_envs * (self.n_steps + 1)
step_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs, 1,
n_batch_step, reuse=False, **self.policy_kwargs)
self.params = tf_util.get_trainable_vars("model")
with tf.variable_scope("train_model", reuse=True,
custom_getter=tf_util.outer_scope_getter("train_model")):
train_model = self.policy(self.sess, self.observation_space, self.action_space, self.n_envs,
self.n_steps + 1, n_batch_train, reuse=True, **self.policy_kwargs)
with tf.variable_scope("moving_average"):
# create averaged model
ema = tf.train.ExponentialMovingAverage(self.alpha)
ema_apply_op = ema.apply(self.params)
def custom_getter(getter, name, *args, **kwargs):
name = name.replace("polyak_model/", "")
val = ema.average(getter(name, *args, **kwargs))
return val
with tf.variable_scope("polyak_model", reuse=True, custom_getter=custom_getter):
self.polyak_model = polyak_model = self.policy(self.sess, self.observation_space, self.action_space,
self.n_envs, self.n_steps + 1,
self.n_envs * (self.n_steps + 1), reuse=True,
**self.policy_kwargs)
with tf.variable_scope("loss", reuse=False):
self.done_ph = tf.placeholder(tf.float32, [self.n_batch]) # dones
self.reward_ph = tf.placeholder(tf.float32, [self.n_batch]) # rewards, not returns
self.mu_ph = tf.placeholder(tf.float32, [self.n_batch, self.n_act]) # mu's
self.action_ph = train_model.pdtype.sample_placeholder([self.n_batch])
self.learning_rate_ph = tf.placeholder(tf.float32, [])
eps = 1e-6
# Notation: (var) = batch variable, (var)s = sequence variable,
# (var)_i = variable index by action at step i
# shape is [n_envs * (n_steps + 1)]
if continuous:
value = train_model.value_flat
else:
value = tf.reduce_sum(train_model.policy_proba * train_model.q_value, axis=-1)
rho, rho_i_ = None, None
if continuous:
action_ = strip(train_model.proba_distribution.sample(), self.n_envs, self.n_steps)
distribution_f = tf.contrib.distributions.MultivariateNormalDiag(
loc=strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps),
scale_diag=strip(train_model.proba_distribution.logstd, self.n_envs, self.n_steps))
f_polyak = tf.contrib.distributions.MultivariateNormalDiag(
loc=strip(polyak_model.proba_distribution.mean, self.n_envs, self.n_steps),
scale_diag=strip(polyak_model.proba_distribution.logstd, self.n_envs, self.n_steps))
f_i = distribution_f.prob(self.action_ph)
f_i_ = distribution_f.prob(action_)
f_polyak_i = f_polyak.prob(self.action_ph)
phi_i = strip(train_model.proba_distribution.mean, self.n_envs, self.n_steps)
q_value = strip(train_model.value_fn, self.n_envs, self.n_steps)
q_i = q_value[:, 0]
rho_i = tf.reshape(f_i, [-1, 1]) / (self.mu_ph + eps)
rho_i_ = tf.reshape(f_i_, [-1, 1]) / (self.mu_ph + eps)
qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, tf.pow(rho_i, 1 / self.n_act),
self.n_envs, self.n_steps, self.gamma)
else:
# strip off last step
# f is a distribution, chosen to be Gaussian distributions
# with fixed diagonal covariance and mean \phi(x)
# in the paper
distribution_f, f_polyak, q_value = \
map(lambda variables: strip(variables, self.n_envs, self.n_steps),
[train_model.policy_proba, polyak_model.policy_proba, train_model.q_value])
# Get pi and q values for actions taken
f_i = get_by_index(distribution_f, self.action_ph)
f_i_ = distribution_f
phi_i = distribution_f
f_polyak_i = f_polyak
q_i = get_by_index(q_value, self.action_ph)
# Compute ratios for importance truncation
rho = distribution_f / (self.mu_ph + eps)
rho_i = get_by_index(rho, self.action_ph)
# Calculate Q_retrace targets
qret = q_retrace(self.reward_ph, self.done_ph, q_i, value, rho_i, self.n_envs, self.n_steps,
self.gamma)
# Calculate losses
# Entropy
entropy = tf.reduce_sum(train_model.proba_distribution.entropy())
# Policy Gradient loss, with truncated importance sampling & bias correction
value = strip(value, self.n_envs, self.n_steps, True)
# check_shape([qret, value, rho_i, f_i], [[self.n_envs * self.n_steps]] * 4)
# check_shape([rho, distribution_f, q_value], [[self.n_envs * self.n_steps, self.n_act]] * 2)
# Truncated importance sampling
adv = qret - value
log_f = tf.log(f_i + eps)
# [n_envs * n_steps]
gain_f = log_f * tf.stop_gradient(adv * tf.minimum(self.correction_term, rho_i))
loss_f = -tf.reduce_mean(gain_f)
# Bias correction for the truncation
adv_bc = (q_value - tf.reshape(value, [self.n_envs * self.n_steps, 1])) # [n_envs * n_steps, n_act]
# check_shape([adv_bc, log_f_bc], [[self.n_envs * self.n_steps, self.n_act]] * 2)
if continuous:
gain_bc = tf.stop_gradient(adv_bc *
tf.nn.relu(1.0 - (self.correction_term / (rho_i_ + eps))) *
f_i_)
else:
log_f_bc = tf.log(f_i_ + eps) # / (f_old + eps)
gain_bc = tf.reduce_sum(log_f_bc *
tf.stop_gradient(
adv_bc *
tf.nn.relu(1.0 - (self.correction_term / (rho + eps))) *
f_i_),
axis=1)
# IMP: This is sum, as expectation wrt f
loss_bc = -tf.reduce_mean(gain_bc)
loss_policy = loss_f + loss_bc
# Value/Q function loss, and explained variance
check_shape([qret, q_i], [[self.n_envs * self.n_steps]] * 2)
explained_variance = q_explained_variance(tf.reshape(q_i, [self.n_envs, self.n_steps]),
tf.reshape(qret, [self.n_envs, self.n_steps]))
loss_q = tf.reduce_mean(tf.square(tf.stop_gradient(qret) - q_i) * 0.5)
# Net loss
check_shape([loss_policy, loss_q, entropy], [[]] * 3)
loss = loss_policy + self.q_coef * loss_q - self.ent_coef * entropy
tf.summary.scalar('entropy_loss', entropy)
tf.summary.scalar('policy_gradient_loss', loss_policy)
tf.summary.scalar('value_function_loss', loss_q)
tf.summary.scalar('loss', loss)
norm_grads_q, norm_grads_policy, avg_norm_grads_f = None, None, None
avg_norm_k, avg_norm_g, avg_norm_k_dot_g, avg_norm_adj = None, None, None, None
if self.trust_region:
# [n_envs * n_steps, n_act]
grad = tf.gradients(- (loss_policy - self.ent_coef * entropy) * self.n_steps * self.n_envs,
phi_i)
# [n_envs * n_steps, n_act] # Directly computed gradient of KL divergence wrt f
kl_grad = - f_polyak_i / (f_i_ + eps)
k_dot_g = tf.reduce_sum(kl_grad * grad, axis=-1)
adj = tf.maximum(0.0, (tf.reduce_sum(kl_grad * grad, axis=-1) - self.delta) / (
tf.reduce_sum(tf.square(kl_grad), axis=-1) + eps)) # [n_envs * n_steps]
# Calculate stats (before doing adjustment) for logging.
avg_norm_k = avg_norm(kl_grad)
avg_norm_g = avg_norm(grad)
avg_norm_k_dot_g = tf.reduce_mean(tf.abs(k_dot_g))
avg_norm_adj = tf.reduce_mean(tf.abs(adj))
grad = grad - tf.reshape(adj, [self.n_envs * self.n_steps, 1]) * kl_grad
# These are turst region adjusted gradients wrt f ie statistics of policy pi
grads_f = -grad / (self.n_envs * self.n_steps)
grads_policy = tf.gradients(f_i_, self.params, grads_f)
grads_q = tf.gradients(loss_q * self.q_coef, self.params)
grads = [gradient_add(g1, g2, param, verbose=self.verbose)
for (g1, g2, param) in zip(grads_policy, grads_q, self.params)]
avg_norm_grads_f = avg_norm(grads_f) * (self.n_steps * self.n_envs)
norm_grads_q = tf.global_norm(grads_q)
norm_grads_policy = tf.global_norm(grads_policy)
else:
grads = tf.gradients(loss, self.params)
norm_grads = None
if self.max_grad_norm is not None:
grads, norm_grads = tf.clip_by_global_norm(grads, self.max_grad_norm)
grads = list(zip(grads, self.params))
with tf.variable_scope("input_info", reuse=False):
tf.summary.scalar('rewards', tf.reduce_mean(self.reward_ph))
tf.summary.scalar('learning_rate', tf.reduce_mean(self.learning_rate))
tf.summary.scalar('advantage', tf.reduce_mean(adv))
tf.summary.scalar('action_probability', tf.reduce_mean(self.mu_ph))
if self.full_tensorboard_log:
tf.summary.histogram('rewards', self.reward_ph)
tf.summary.histogram('learning_rate', self.learning_rate)
tf.summary.histogram('advantage', adv)
tf.summary.histogram('action_probability', self.mu_ph)
if tf_util.is_image(self.observation_space):
tf.summary.image('observation', train_model.obs_ph)
else:
tf.summary.histogram('observation', train_model.obs_ph)
trainer = tf.train.RMSPropOptimizer(learning_rate=self.learning_rate_ph, decay=self.rprop_alpha,
epsilon=self.rprop_epsilon)
_opt_op = trainer.apply_gradients(grads)
# so when you call _train, you first do the gradient step, then you apply ema
with tf.control_dependencies([_opt_op]):
_train = tf.group(ema_apply_op)
# Ops/Summaries to run, and their names for logging
assert norm_grads is not None
run_ops = [_train, loss, loss_q, entropy, loss_policy, loss_f, loss_bc, explained_variance, norm_grads]
names_ops = ['loss', 'loss_q', 'entropy', 'loss_policy', 'loss_f', 'loss_bc', 'explained_variance',
'norm_grads']
if self.trust_region:
self.run_ops = run_ops + [norm_grads_q, norm_grads_policy, avg_norm_grads_f, avg_norm_k, avg_norm_g,
avg_norm_k_dot_g, avg_norm_adj]
self.names_ops = names_ops + ['norm_grads_q', 'norm_grads_policy', 'avg_norm_grads_f', 'avg_norm_k',
'avg_norm_g', 'avg_norm_k_dot_g', 'avg_norm_adj']
self.train_model = train_model
self.step_model = step_model
self.step = step_model.step
self.proba_step = step_model.proba_step
self.initial_state = step_model.initial_state
tf.global_variables_initializer().run(session=self.sess)
self.summary = tf.summary.merge_all()
def _train_step(self, obs, actions, rewards, dones, mus, states, masks, steps, writer=None):
"""
applies a training step to the model
:param obs: ([float]) The input observations
:param actions: ([float]) The actions taken
:param rewards: ([float]) The rewards from the environment
:param dones: ([bool]) Whether or not the episode is over (aligned with reward, used for reward calculation)
:param mus: ([float]) The logits values
:param states: ([float]) The states (used for recurrent policies)
:param masks: ([bool]) Whether or not the episode is over (used for recurrent policies)
:param steps: (int) the number of steps done so far (can be None)
:param writer: (TensorFlow Summary.writer) the writer for tensorboard
:return: ([str], [float]) the list of update operation name, and the list of the results of the operations
"""
cur_lr = self.learning_rate_schedule.value_steps(steps)
td_map = {self.train_model.obs_ph: obs, self.polyak_model.obs_ph: obs, self.action_ph: actions,
self.reward_ph: rewards, self.done_ph: dones, self.mu_ph: mus, self.learning_rate_ph: cur_lr}
if states is not None:
td_map[self.train_model.states_ph] = states
td_map[self.train_model.dones_ph] = masks
td_map[self.polyak_model.states_ph] = states
td_map[self.polyak_model.dones_ph] = masks
if writer is not None:
# run loss backprop with summary, but once every 10 runs save the metadata (memory, compute time, ...)
if self.full_tensorboard_log and (1 + (steps / self.n_batch)) % 10 == 0:
run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
run_metadata = tf.RunMetadata()
step_return = self.sess.run([self.summary] + self.run_ops, td_map, options=run_options,
run_metadata=run_metadata)
writer.add_run_metadata(run_metadata, 'step%d' % steps)
else:
step_return = self.sess.run([self.summary] + self.run_ops, td_map)
writer.add_summary(step_return[0], steps)
step_return = step_return[1:]
else:
step_return = self.sess.run(self.run_ops, td_map)
return self.names_ops, step_return[1:] # strip off _train
def learn(self, total_timesteps, callback=None, log_interval=100, tb_log_name="ACER",
reset_num_timesteps=True):
new_tb_log = self._init_num_timesteps(reset_num_timesteps)
callback = self._init_callback(callback)
with SetVerbosity(self.verbose), TensorboardWriter(self.graph, self.tensorboard_log, tb_log_name, new_tb_log) \
as writer:
self._setup_learn()
self.learning_rate_schedule = Scheduler(initial_value=self.learning_rate, n_values=total_timesteps,
schedule=self.lr_schedule)
episode_stats = EpisodeStats(self.n_steps, self.n_envs)
if self.replay_ratio > 0:
buffer = Buffer(env=self.env, n_steps=self.n_steps, size=self.buffer_size)
else:
buffer = None
t_start = time.time()
callback.on_training_start(locals(), globals())
# n_batch samples, 1 on_policy call and multiple off-policy calls
for steps in range(0, total_timesteps, self.n_batch):
callback.on_rollout_start()
enc_obs, obs, actions, rewards, mus, dones, masks = self.runner.run(callback)
callback.update_locals(locals())
callback.on_rollout_end()
# Early stopping due to the callback
if not self.runner.continue_training:
break
episode_stats.feed(rewards, dones)
if buffer is not None:
buffer.put(enc_obs, actions, rewards, mus, dones, masks)
if writer is not None:
total_episode_reward_logger(self.episode_reward,
rewards.reshape((self.n_envs, self.n_steps)),
dones.reshape((self.n_envs, self.n_steps)),
writer, self.num_timesteps)
# reshape stuff correctly
obs = obs.reshape(self.runner.batch_ob_shape)
actions = actions.reshape([self.n_batch])
rewards = rewards.reshape([self.n_batch])
mus = mus.reshape([self.n_batch, self.n_act])
dones = dones.reshape([self.n_batch])
masks = masks.reshape([self.runner.batch_ob_shape[0]])
names_ops, values_ops = self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks,
self.num_timesteps, writer)
if self.verbose >= 1 and (int(steps / self.n_batch) % log_interval == 0):
logger.record_tabular("total_timesteps", self.num_timesteps)
logger.record_tabular("fps", int(steps / (time.time() - t_start)))
# IMP: In EpisodicLife env, during training, we get done=True at each loss of life,
# not just at the terminal state. Thus, this is mean until end of life, not end of episode.
# For true episode rewards, see the monitor files in the log folder.
logger.record_tabular("mean_episode_length", episode_stats.mean_length())
logger.record_tabular("mean_episode_reward", episode_stats.mean_reward())
for name, val in zip(names_ops, values_ops):
logger.record_tabular(name, float(val))
logger.dump_tabular()
if (self.replay_ratio > 0 and
buffer is not None and
buffer.has_atleast(self.replay_start)):
samples_number = np.random.poisson(self.replay_ratio)
for _ in range(samples_number):
# get obs, actions, rewards, mus, dones from buffer.
obs, actions, rewards, mus, dones, masks = buffer.get()
# reshape stuff correctly
obs = obs.reshape(self.runner.batch_ob_shape)
actions = actions.reshape([self.n_batch])
rewards = rewards.reshape([self.n_batch])
mus = mus.reshape([self.n_batch, self.n_act])
dones = dones.reshape([self.n_batch])
masks = masks.reshape([self.runner.batch_ob_shape[0]])
self._train_step(obs, actions, rewards, dones, mus, self.initial_state, masks,
self.num_timesteps)
callback.on_training_end()
return self
def save(self, save_path, cloudpickle=False):
data = {
"gamma": self.gamma,
"n_steps": self.n_steps,
"q_coef": self.q_coef,
"ent_coef": self.ent_coef,
"max_grad_norm": self.max_grad_norm,
"learning_rate": self.learning_rate,
"lr_schedule": self.lr_schedule,
"rprop_alpha": self.rprop_alpha,
"rprop_epsilon": self.rprop_epsilon,
"replay_ratio": self.replay_ratio,
"replay_start": self.replay_start,
"verbose": self.verbose,
"policy": self.policy,
"observation_space": self.observation_space,
"action_space": self.action_space,
"n_envs": self.n_envs,
'n_cpu_tf_sess': self.n_cpu_tf_sess,
'seed': self.seed,
"_vectorize_action": self._vectorize_action,
"policy_kwargs": self.policy_kwargs
}
params_to_save = self.get_parameters()
self._save_to_file(save_path, data=data, params=params_to_save, cloudpickle=cloudpickle)
class _Runner(AbstractEnvRunner):
def __init__(self, env, model, n_steps):
"""
A runner to learn the policy of an environment for a model
:param env: (Gym environment) The environment to learn from
:param model: (Model) The model to learn
:param n_steps: (int) The number of steps to run for each environment
"""
super(_Runner, self).__init__(env=env, model=model, n_steps=n_steps)
self.env = env
self.model = model
self.n_env = n_env = env.num_envs
if isinstance(env.action_space, Discrete):
self.n_act = env.action_space.n
else:
self.n_act = env.action_space.shape[-1]
self.n_batch = n_env * n_steps
if len(env.observation_space.shape) > 1:
self.raw_pixels = True
obs_height, obs_width, obs_num_channels = env.observation_space.shape
self.batch_ob_shape = (n_env * (n_steps + 1), obs_height, obs_width, obs_num_channels)
self.obs_dtype = np.uint8
self.obs = np.zeros((n_env, obs_height, obs_width, obs_num_channels), dtype=self.obs_dtype)
self.num_channels = obs_num_channels
else:
if len(env.observation_space.shape) == 1:
self.obs_dim = env.observation_space.shape[0]
else:
self.obs_dim = 1
self.raw_pixels = False
if isinstance(self.env.observation_space, Discrete):
self.batch_ob_shape = (n_env * (n_steps + 1),)
else:
self.batch_ob_shape = (n_env * (n_steps + 1), self.obs_dim)
self.obs_dtype = np.float32
self.n_steps = n_steps
self.states = model.initial_state
self.dones = [False for _ in range(n_env)]
def _run(self):
"""
Run a step leaning of the model
:return: ([float], [float], [int64], [float], [float], [bool], [float])
encoded observation, observations, actions, rewards, mus, dones, masks
"""
enc_obs = [self.obs]
mb_obs, mb_actions, mb_mus, mb_dones, mb_rewards = [], [], [], [], []
for _ in range(self.n_steps):
actions, _, states, _ = self.model.step(self.obs, self.states, self.dones)
mus = self.model.proba_step(self.obs, self.states, self.dones)
mb_obs.append(np.copy(self.obs))
mb_actions.append(actions)
mb_mus.append(mus)
mb_dones.append(self.dones)
clipped_actions = actions
# Clip the actions to avoid out of bound error
if isinstance(self.env.action_space, Box):
clipped_actions = np.clip(actions, self.env.action_space.low, self.env.action_space.high)
obs, rewards, dones, _ = self.env.step(clipped_actions)
self.model.num_timesteps += self.n_envs
if self.callback is not None:
# Abort training early
self.callback.update_locals(locals())
if self.callback.on_step() is False:
self.continue_training = False
# Return dummy values
return [None] * 7
# states information for statefull models like LSTM
self.states = states
self.dones = dones
self.obs = obs
mb_rewards.append(rewards)
enc_obs.append(obs)
mb_obs.append(np.copy(self.obs))
mb_dones.append(self.dones)
enc_obs = np.asarray(enc_obs, dtype=self.obs_dtype).swapaxes(1, 0)
mb_obs = np.asarray(mb_obs, dtype=self.obs_dtype).swapaxes(1, 0)
mb_actions = np.asarray(mb_actions, dtype=np.int64).swapaxes(1, 0)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).swapaxes(1, 0)
mb_mus = np.asarray(mb_mus, dtype=np.float32).swapaxes(1, 0)
mb_dones = np.asarray(mb_dones, dtype=np.bool).swapaxes(1, 0)
mb_masks = mb_dones # Used for statefull models like LSTM's to mask state when done
mb_dones = mb_dones[:, 1:] # Used for calculating returns. The dones array is now aligned with rewards
# shapes are now [nenv, nsteps, []]
# When pulling from buffer, arrays will now be reshaped in place, preventing a deep copy.
return enc_obs, mb_obs, mb_actions, mb_rewards, mb_mus, mb_dones, mb_masks
|
mit
|
team-vigir/vigir_pluginlib
|
vigir_pluginlib_manager/src/vigir_pluginlib_manager/plugin_tree_model.py
|
1
|
9877
|
#!/usr/bin/env python
import bisect
import QtCore
import rospy
import rospkg
from python_qt_binding.QtCore import Qt, QObject, QAbstractItemModel
from vigir_pluginlib_msgs.msg import PluginState, PluginDescription
# Tree Model for Plugins
class PluginTreeModel(QtCore.QAbstractItemModel):
def __init__(self, parent=None, *args):
super(PluginTreeModel, self).__init__(parent)
self._root_item = PluginTreeItem()
self._plugin_states = []
def clear(self):
self._root_item.clear()
def columnCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return parent.internalPointer().columnCount()
else:
return self._root_item.columnCount()
def rowCount(self, parent=QtCore.QModelIndex()):
if parent.isValid():
return parent.internalPointer().childCount()
else:
return self._root_item.childCount()
def headerData(self, section, orientation, role):
if orientation == QtCore.Qt.Horizontal and role == QtCore.Qt.DisplayRole:
if section == 0:
return 'Class'
elif section == 1:
return 'Name'
return None
def insertRows(self, row, count, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
self.beginInsertRows(parent, row, row + count - 1)
success = parent_item.insertChildren(row, count)
self.endInsertRows()
return success
def removeRows(self, row, count, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
self.beginRemoveRows(parent, row, row + count - 1)
success = parent_item.removeChildren(row, count)
self.endRemoveRows()
# remove empty branch
if success and parent_item.parentItem() and not parent_item.childCount():
return self.removeRows(parent_item.childNumber(), 1)
return success
def addBranch(self, base_class):
# check if branch already does exist
branch = self.findBranch(base_class)
if branch.isValid():
return branch
state = PluginState()
state.description.base_class = base_class
# add new branch
temp_list = [child.getPluginState().description.base_class for child in self._root_item.childs()]
position = bisect.bisect(temp_list, base_class)
if self.insertRows(position, 1):
branch_item = self._root_item.child(position)
branch_item.setData(state)
return self.index(position, 0)
return QtCore.QModelIndex()
def addItem(self, state):
# search for branch with type_class
branch = self.addBranch(state.description.base_class)
branch_item = self.getItem(branch)
# check if child already does exist
child = self.findChild(state.description, branch)
if child.isValid():
return child
# add new item to branch
entry = (state.description.type_class, state.description.name)
temp_list = [(child.getPluginState().description.type_class, child.getPluginState().description.name) for child in branch_item.childs()]
position = bisect.bisect(temp_list, entry)
if self.insertRows(position, 1, branch):
child_item = branch_item.child(position)
child_item.setData(state)
return self.index(position, 0, branch)
return QtCore.QModelIndex()
def getItem(self, index=QtCore.QModelIndex()):
if index.isValid():
return index.internalPointer()
else:
return self._root_item
def setData(self, data):
self.clear()
self.updateData(data)
def updateData(self, data):
# update empty entries to keep UI tidy
for state in data:
if not state.description.base_class:
state.description.base_class = 'Unknown'
if not state.description.type_class:
state.description.type_class = 'Unknown'
# collect entries which does not exist in recent update anymore
rows = []
for branch_item in self._root_item.childs():
branch_index = self.index(branch_item.childNumber(), 0)
for child_item in branch_item.childs():
result = filter(lambda state: state.description == child_item.getPluginState().description, data)
if not result:
rows.append((child_item.childNumber(), branch_index))
# remove obsolete entries, TODO: check for branch!
rows.sort(reverse=True)
for row in rows:
self.removeRows(row[0], 1, row[1])
# adding entries
for state in data:
# adding new entries
self.addItem(state)
def data(self, index, role):
if not index.isValid():
return None
elif role == QtCore.Qt.DisplayRole:
return index.internalPointer().data(index.column())
else:
return None
def flags(self, index=QtCore.QModelIndex()):
if index.isValid():
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable | super(PluginTreeModel, self).flags(index) #QtCore.Qt.ItemIsEditable
else:
return QtCore.Qt.NoItemFlags
def index(self, row, column, parent=QtCore.QModelIndex()):
if not self.hasIndex(row, column, parent):
return QtCore.QModelIndex()
if parent.isValid() and parent.column() != 0:
return QtCore.QModelIndex()
parent_item = self.getItem(parent)
child_item = parent_item.child(row)
if child_item:
return self.createIndex(row, column, child_item)
else:
return QtCore.QModelIndex()
def parent(self, index=QtCore.QModelIndex()):
if not index.isValid():
return QtCore.QModelIndex()
child_item = self.getItem(index)
parent_item = child_item.parentItem()
if parent_item == self._root_item:
return QtCore.QModelIndex()
return self.createIndex(parent_item.childNumber(), 0, parent_item)
def findChild(self, description, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
if parent_item == self._root_item:
parent = self.findBranch(description.base_class, parent)
if not parent.isValid():
return QtCore.QModelIndex()
child_item = parent_item.findChild(description)
if child_item:
return self.index(child_item.childNumber(), 0, parent)
else:
return QtCore.QModelIndex()
def findBranch(self, base_class, parent=QtCore.QModelIndex()):
parent_item = self.getItem(parent)
child_item = parent_item.findBranch(base_class)
if child_item:
return self.index(child_item.childNumber(), 0, parent)
else:
return QtCore.QModelIndex()
def expandChildren(self, index, view):
if not index.isValid():
return
for i in range(0, index.model().rowCount(index)):
child = index.child(i, 0)
self.expandChildren(child, view)
if not view.expanded(index):
view.expand(index)
def expandAll(self):
self.expandChildren(self.createIndex(0, 0, self._root_item))
# Tree Item for Plugins
class PluginTreeItem:
def __init__(self, state=PluginState(), parent=None):
self._parent_item = parent
self._child_items = []
self._plugin_state = PluginState()
self.setData(state)
def clear(self):
for child in self._child_items:
child.clear()
self._child_items = []
self._plugin_state = PluginState()
def child(self, row):
if row < self.childCount():
return self._child_items[row]
else:
return None
def childs(self):
return self._child_items
def childCount(self):
return len(self._child_items)
def childNumber(self):
if self._parent_item is not None:
return self._parent_item._child_items.index(self)
return 0
def insertChildren(self, position, count):
if position < 0 or position > self.childCount():
return False
for row in range(0, count):
self._child_items.insert(position, PluginTreeItem(parent=self))
return True
def removeChildren(self, position, count):
if position < 0 or position > self.childCount():
return False
del self._child_items[position:position+count]
return True
def columnCount(self):
return 2
def data(self, column):
if column == 0:
if self.childCount() > 0:
return self._plugin_state.description.base_class
else:
return self._plugin_state.description.type_class
elif column == 1:
if self.childCount() > 0:
return ""
else:
return self._plugin_state.description.name
else:
return None
def setData(self, state):
self._plugin_state = state
def getPluginState(self):
return self._plugin_state
def findChild(self, description):
child_item = filter(lambda child: child.getPluginState().description == description, self._child_items)
if not child_item:
return None
else:
return child_item[0]
def findBranch(self, base_class):
branch_item = filter(lambda child: child.getPluginState().description.base_class == base_class, self._child_items)
if not branch_item:
return None
else:
return branch_item[0]
def parentItem(self):
return self._parent_item
|
bsd-3-clause
|
pombredanne/pyjs
|
pyjswidgets/pyjamas/Canvas2D.py
|
7
|
3794
|
# Canvas wrapper component for Pyjamas
# Ported by Willie Gollino from Canvas component for GWT - Originally by Alexei Sokolov http://gwt.components.googlepages.com/
#
# Canvas API reference:
# http://developer.apple.com/documentation/AppleApplications/Reference/SafariJSRef/Classes/Canvas.html#//apple_ref/js/Canvas.clearRect
#
# Usage Notes:
# - IE support requires ExplorerCanvas from excanvas.sourceforge.net
# - place excanvas.js in your apps public folder
# - add this to your MainModule.html: <!--[if IE]><script src="excanvas.js" type="text/javascript"></script><![endif]-->
from pyjamas import DOM
from pyjamas.ui.Image import Image
from pyjamas.ui.FocusWidget import FocusWidget
from pyjamas.ui import Event
from pyjamas.ui import MouseListener
from pyjamas.ui import KeyboardListener
from pyjamas.ui import Focus
from pyjamas.ui import FocusListener
from __pyjamas__ import JS
class Canvas(FocusWidget):
def __init__(self, Width=0, Height=0, **kwargs):
if not kwargs.has_key('StyleName'):
kwargs['StyleName'] = 'gwt-Canvas'
kwargs['Width'] = Width
kwargs['Height'] = Height
self.context = None
focusable = Focus.createFocusable()
self.canvas = DOM.createElement("canvas")
DOM.appendChild(focusable, self.canvas)
FocusWidget.__init__(self, focusable, **kwargs)
self.init()
self.context.fillStyle = "black"
self.context.strokeStyle = "black"
#add onImageLoad, since some listeners use it
self.onImageLoad = self.onLoad
def setWidth(self, width):
FocusWidget.setWidth(self, width)
self.canvas.width = width
def setHeight(self, height):
FocusWidget.setHeight(self, height)
self.canvas.height = height
def getContext(self):
return self.context
def isEmulation(self):
return False
def init(self):
el = self.getElement().firstChild
ctx = el.getContext("2d")
"""
ctx._createPattern = ctx.createPattern
ctx.createPattern = function(img, rep) {
if (!(img instanceof Image)) img = img.getElement();
return self._createPattern(img, rep)
}
ctx._drawImage = ctx.drawImage
ctx.drawImage = function() {
var a=arguments
if (!(a[0] instanceof Image)) a[0] = a[0].getElement()
if (a.length==9) return self._drawImage(a[0], a[1], a[2], a[3], a[4], a[5], a[6], a[7], a[8])
else if (a.length==5) return self._drawImage(a[0], a[1], a[2], a[3], a[4])
return self._drawImage(a[0], a[1], a[2])
}
"""
self.context = ctx
class CanvasImage(Image):
def __init__(self, url="", load_listener = None):
Image.__init__(self, url)
if load_listener:
self.addLoadListener(load_listener)
self.onAttach()
def isLoaded(self):
return self.getElement().complete
class ImageLoadListener:
def __init__(self, listener = None):
self.wait_list = []
self.loadListeners = []
if listener:
self.addLoadListener(listener)
self.onImageLoad = self.onLoad
def add(self, sender):
self.wait_list.append(sender)
sender.addLoadListener(self)
def addLoadListener(self, listener):
self.loadListeners.append(listener)
def isLoaded(self):
if len(self.wait_list):
return False
return True
def onError(self, sender):
for listener in self.loadListeners:
listener.onError(sender)
def onLoad(self, sender):
self.wait_list.remove(sender)
if self.isLoaded():
for listener in self.loadListeners:
listener.onLoad(self)
|
apache-2.0
|
tavaresdong/cs61a-projects
|
projects/ants/tests/09.py
|
4
|
10915
|
test = {
'name': 'Problem 9',
'points': 4,
'suites': [
{
'cases': [
{
'code': r"""
>>> # QueenAnt Placement
>>> queen = ants.QueenAnt()
>>> impostor = ants.QueenAnt()
>>> front_ant, back_ant = ants.ThrowerAnt(), ants.ThrowerAnt()
>>> tunnel = [colony.places['tunnel_0_{0}'.format(i)]
... for i in range(9)]
>>> tunnel[1].add_insect(back_ant)
>>> tunnel[7].add_insect(front_ant)
>>> tunnel[4].add_insect(impostor)
>>> impostor.action(colony)
>>> impostor.armor # Impostors must die!
0
>>> tunnel[4].ant is None
True
>>> back_ant.damage # Ants should not be buffed
1
>>> front_ant.damage
1
>>> tunnel[4].add_insect(queen)
>>> queen.action(colony)
>>> queen.armor # Long live the Queen!
1
>>> back_ant.damage # Ants behind queen should be buffed
2
>>> front_ant.damage
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # QueenAnt Removal
>>> queen = ants.QueenAnt()
>>> impostor = ants.QueenAnt()
>>> place = colony.places['tunnel_0_2']
>>> place.add_insect(impostor)
>>> place.remove_insect(impostor)
>>> place.ant is None # Impostors can be removed
True
>>> place.add_insect(queen)
>>> place.remove_insect(queen)
>>> place.ant is queen # True queen cannot be removed
True
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # QueenAnt knows how to swim
>>> queen = ants.QueenAnt()
>>> water = ants.Water('Water')
>>> water.add_insect(queen)
>>> queen.armor
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Testing damage multiplier
>>> queen_tunnel, side_tunnel = [[colony.places['tunnel_{0}_{1}'.format(i, j)]
... for j in range(9)] for i in range(2)]
>>> queen = ants.QueenAnt()
>>> back = ants.ThrowerAnt()
>>> front = ants.ThrowerAnt()
>>> guard = ants.BodyguardAnt()
>>> guarded = ants.ThrowerAnt()
>>> side = ants.ThrowerAnt()
>>> bee = ants.Bee(10)
>>> side_bee = ants.Bee(10)
>>> queen_tunnel[0].add_insect(back)
>>> queen_tunnel[1].add_insect(guard)
>>> queen_tunnel[1].add_insect(guarded)
>>> queen_tunnel[2].add_insect(queen)
>>> queen_tunnel[3].add_insect(front)
>>> side_tunnel[0].add_insect(side)
>>> queen_tunnel[4].add_insect(bee)
>>> side_tunnel[4].add_insect(side_bee)
>>> queen.action(colony)
>>> bee.armor
9
>>> back.action(colony)
>>> bee.armor
7
>>> front.action(colony)
>>> bee.armor
6
>>> guard.action(colony)
>>> bee.armor
4
>>> side.action(colony)
>>> side_bee.armor
9
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import ants, importlib
>>> importlib.reload(ants)
>>> hive = ants.Hive(ants.AssaultPlan())
>>> dimensions = (2, 9)
>>> colony = ants.AntColony(None, hive, ants.ant_types(),
... ants.dry_layout, dimensions)
>>> ants.bees_win = lambda: None
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> # Testing game over
>>> queen = ants.QueenAnt()
>>> impostor = ants.QueenAnt()
>>> tunnel = [colony.places['tunnel_0_{0}'.format(i)]
... for i in range(9)]
>>> tunnel[4].add_insect(queen)
>>> tunnel[6].add_insect(impostor)
>>> bee = ants.Bee(3)
>>> tunnel[6].add_insect(bee) # Bee in place with impostor
>>> bee.action(colony) # Game should not end
>>> bee.move_to(tunnel[4]) # Bee moved to place with true queen
>>> bee.action(colony) # Game should end
BeesWinException
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Testing if queen will not crash with no one to buff
>>> queen = ants.QueenAnt()
>>> colony.places['tunnel_0_2'].add_insect(queen)
>>> queen.action(colony)
>>> # Attack a bee
>>> bee = ants.Bee(3)
>>> colony.places['tunnel_0_4'].add_insect(bee)
>>> queen.action(colony)
>>> bee.armor # Queen should still hit the bee
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Testing QueenAnt action method
>>> queen = ants.QueenAnt()
>>> impostor = ants.QueenAnt()
>>> bee = ants.Bee(10)
>>> ant = ants.ThrowerAnt()
>>> colony.places['tunnel_0_0'].add_insect(ant)
>>> colony.places['tunnel_0_1'].add_insect(queen)
>>> colony.places['tunnel_0_2'].add_insect(impostor)
>>> colony.places['tunnel_0_4'].add_insect(bee)
>>> impostor.action(colony)
>>> bee.armor # Impostor should not damage bee
10
>>> ant.damage # Impostor should not double damage
1
>>> queen.action(colony)
>>> bee.armor # Queen should damage bee
9
>>> ant.damage # Queen should double damage
2
>>> ant.action(colony)
>>> bee.armor # If failed, ThrowerAnt has incorrect damage
7
>>> queen.armor # Long live the Queen
1
>>> impostor.armor # Short-lived impostor
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Extensive damage doubling tests
>>> queen_tunnel, side_tunnel = [[colony.places['tunnel_{0}_{1}'.format(i, j)]
... for j in range(9)] for i in range(2)]
>>> queen = ants.QueenAnt()
>>> queen_tunnel[7].add_insect(queen)
>>> # Turn 0
>>> thrower = ants.ThrowerAnt()
>>> fire = ants.FireAnt()
>>> ninja = ants.NinjaAnt()
>>> side = ants.ThrowerAnt()
>>> front = ants.NinjaAnt()
>>> queen_tunnel[0].add_insect(thrower)
>>> queen_tunnel[1].add_insect(fire)
>>> queen_tunnel[2].add_insect(ninja)
>>> queen_tunnel[8].add_insect(front)
>>> side_tunnel[0].add_insect(side)
>>> buffed_ants = [thrower, fire, ninja]
>>> old_dmgs = [ant.damage for ant in buffed_ants]
>>> queen.action(colony)
>>> for ant, dmg in zip(buffed_ants, old_dmgs):
... assert ant.damage == dmg * 2,\
... "{0}'s damage is {1}, but should be {2}".format(ant, ant.damage, dmg * 2)
>>> for ant in [side, front]:
... assert ant.damage == dmg,\
... "{0}'s damage is {1}, but should be {2}".format(ant, ant.damage, dmg)
>>> assert queen.damage == 1,\
... 'QueenAnt damage was modified to {0}'.format(ant.damage)
>>> # Turn 1
>>> tank = ants.TankAnt()
>>> guard = ants.BodyguardAnt()
>>> queen_tank = ants.TankAnt()
>>> queen_tunnel[6].add_insect(tank) # Not protecting an ant
>>> queen_tunnel[1].add_insect(guard) # Guarding FireAnt
>>> queen_tunnel[7].add_insect(queen_tank) # Guarding QueenAnt
>>> buffed_ants.extend([tank, guard])
>>> old_dmgs.extend([ant.damage for ant in [tank, guard, queen_tank]])
>>> queen.action(colony)
>>> for ant, dmg in zip(buffed_ants, old_dmgs):
... assert ant.damage == dmg * 2,\
... "{0}'s damage is {1}, but should be {2}".format(ant, ant.damage, dmg * 2)
>>> # Turn 2
>>> thrower1 = ants.ThrowerAnt()
>>> thrower2 = ants.ThrowerAnt()
>>> queen_tunnel[6].add_insect(thrower1) # Add thrower1 in TankAnt
>>> queen_tunnel[5].add_insect(thrower2)
>>> buffed_ants.extend([thrower1, thrower2])
>>> old_dmgs.extend([ant.damage for ant in [thrower1, thrower2]])
>>> queen.action(colony)
>>> for ant, dmg in zip(buffed_ants, old_dmgs):
... assert ant.damage == dmg * 2,\
... "{0}'s damage is {1}, but should be {2}".format(ant, ant.damage, dmg * 2)
>>> # Turn 3
>>> tank.reduce_armor(tank.armor) # Expose thrower1
>>> queen.action(colony)
>>> for ant, dmg in zip(buffed_ants, old_dmgs):
... assert ant.damage == dmg * 2,\
... "{0}'s damage is {1}, but should be {2}".format(ant, ant.damage, dmg * 2)
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> # Adding/Removing QueenAnt with Container
>>> queen = ants.QueenAnt()
>>> impostor = ants.QueenAnt()
>>> container = ants.TankAnt()
>>> colony.places['tunnel_0_3'].add_insect(container)
>>> colony.places['tunnel_0_3'].add_insect(impostor)
>>> impostor.action(colony)
>>> colony.places['tunnel_0_3'].ant is container
True
>>> container.place is colony.places['tunnel_0_3']
True
>>> container.ant is None
True
>>> impostor.place is None
True
>>> colony.places['tunnel_0_3'].add_insect(queen)
>>> colony.places['tunnel_0_3'].remove_insect(queen)
>>> container.ant is queen
True
>>> queen.place is colony.places['tunnel_0_3']
True
>>> queen.action(colony)
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> import ants, importlib
>>> importlib.reload(ants)
>>> hive = ants.Hive(ants.AssaultPlan())
>>> dimensions = (2, 9)
>>> colony = ants.AntColony(None, hive, ants.ant_types(),
... ants.dry_layout, dimensions)
""",
'teardown': '',
'type': 'doctest'
}
]
}
|
mit
|
qwertyjune/BethSaidaBible
|
venv/lib/python2.7/site-packages/django/contrib/contenttypes/migrations/0001_initial.py
|
83
|
1061
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='ContentType',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('app_label', models.CharField(max_length=100)),
('model', models.CharField(max_length=100, verbose_name='python model class name')),
],
options={
'ordering': ('name',),
'db_table': 'django_content_type',
'verbose_name': 'content type',
'verbose_name_plural': 'content types',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='contenttype',
unique_together=set([('app_label', 'model')]),
),
]
|
gpl-3.0
|
applitools/eyes.selenium.python
|
applitools/selenium/capture/eyes_webdriver_screenshot.py
|
1
|
9401
|
from __future__ import absolute_import
import base64
import typing as tp
from selenium.common.exceptions import WebDriverException
from applitools.core import EyesScreenshot, EyesError, Point, Region, OutOfBoundsError
from applitools.utils import image_utils
from applitools.selenium import eyes_selenium_utils
from applitools.selenium.frames import FrameChain
if tp.TYPE_CHECKING:
from PIL import Image
from applitools.utils.custom_types import ViewPort
from applitools.selenium import EyesWebDriver
class EyesWebDriverScreenshot(EyesScreenshot):
@staticmethod
def create_from_base64(screenshot64, driver):
"""
Creates an instance from the base64 data.
:param screenshot64: The base64 representation of the png bytes.
:param driver: The webdriver for the session.
"""
return EyesWebDriverScreenshot(driver, screenshot64=screenshot64)
@staticmethod
def create_from_image(screenshot, driver):
# type: (Image.Image, EyesWebDriver) -> EyesWebDriverScreenshot
"""
Creates an instance from the base64 data.
:param screenshot: The screenshot image.
:param driver: The webdriver for the session.
"""
return EyesWebDriverScreenshot(driver, screenshot=screenshot)
def __init__(self, driver, screenshot=None, screenshot64=None,
is_viewport_screenshot=None, frame_location_in_screenshot=None):
# type: (EyesWebDriver, Image.Image, None, tp.Optional[bool], tp.Optional[Point]) -> None
"""
Initializes a Screenshot instance. Either screenshot or screenshot64 must NOT be None.
Should not be used directly. Use create_from_image/create_from_base64 instead.
:param driver: EyesWebDriver instance which handles the session from which the screenshot
was retrieved.
:param screenshot: image instance. If screenshot64 is None,
this variable must NOT be none.
:param screenshot64: The base64 representation of a png image. If screenshot
is None, this variable must NOT be none.
:param is_viewport_screenshot: Whether the screenshot object represents a
viewport screenshot or a full screenshot.
:param frame_location_in_screenshot: The location of the frame relative
to the top,left of the screenshot.
:raise EyesError: If the screenshots are None.
"""
if screenshot is None and screenshot64 is None:
raise EyesError("both screenshot and screenshot64 are None!")
if screenshot64:
screenshot = image_utils.image_from_bytes(base64.b64decode(screenshot64))
# initializing of screenshot
super(EyesWebDriverScreenshot, self).__init__(image=screenshot)
self._driver = driver
self._viewport_size = driver.get_default_content_viewport_size(force_query=False) # type: ViewPort
self._frame_chain = driver.frame_chain.clone()
if self._frame_chain:
chain_len = len(self._frame_chain)
self._frame_size = self._frame_chain[chain_len - 1].outer_size
else:
try:
self._frame_size = driver.get_entire_page_size()
except WebDriverException:
# For Appium, we can't get the "entire page size", so we use the viewport size.
self._frame_size = self._viewport_size
# For native Appium Apps we can't get the scroll position, so we use (0,0)
try:
self._scroll_position = driver.get_current_position()
except (WebDriverException, EyesError):
self._scroll_position = Point(0, 0)
if is_viewport_screenshot is None:
is_viewport_screenshot = (self._screenshot.width <= self._viewport_size['width']
and self._screenshot.height <= self._viewport_size['height'])
self._is_viewport_screenshot = is_viewport_screenshot
if frame_location_in_screenshot is None:
if self._frame_chain:
frame_location_in_screenshot = EyesWebDriverScreenshot \
.calc_frame_location_in_screenshot(self._frame_chain, is_viewport_screenshot)
else:
# The frame is the default content
frame_location_in_screenshot = Point(0, 0)
if self._is_viewport_screenshot:
frame_location_in_screenshot.offset(-self._scroll_position.x,
-self._scroll_position.y)
self._frame_location_in_screenshot = frame_location_in_screenshot
self._frame_screenshot_intersect = Region(frame_location_in_screenshot.x,
frame_location_in_screenshot.y,
self._frame_size['width'],
self._frame_size['height'])
self._frame_screenshot_intersect.intersect(Region(width=self._screenshot.width,
height=self._screenshot.height))
@staticmethod
def calc_frame_location_in_screenshot(frame_chain, is_viewport_screenshot):
first_frame = frame_chain[0]
location_in_screenshot = Point(first_frame.location['x'], first_frame.location['y'])
# We only need to consider the scroll of the default content if the screenshot is a
# viewport screenshot. If this is a full page screenshot, the frame location will not
# change anyway.
if is_viewport_screenshot:
location_in_screenshot.x -= first_frame.parent_scroll_position.x
location_in_screenshot.y -= first_frame.parent_scroll_position.y
# For inner frames we must calculate the scroll
inner_frames = frame_chain[1:]
for frame in inner_frames:
location_in_screenshot.x += frame.location['x'] - frame.parent_scroll_position.x
location_in_screenshot.y += frame.location['y'] - frame.parent_scroll_position.y
return location_in_screenshot
@property
def frame_chain(self):
return self._frame_chain
def get_base64(self):
if not self._screenshot64:
self._screenshot64 = image_utils.get_base64(self._screenshot)
return self._screenshot64
def get_location_relative_to_frame_viewport(self, location):
result = {'x': location['x'], 'y': location['y']}
if self._frame_chain or self._is_viewport_screenshot:
result['x'] -= self._scroll_position.x
result['y'] -= self._scroll_position.y
return result
def get_sub_screenshot_by_region(self, region):
sub_screenshot_region = self.get_intersected_region(region)
if sub_screenshot_region.is_empty():
raise OutOfBoundsError("Region {0} is out of bounds!".format(region))
# If we take a screenshot of a region inside a frame, then the frame's (0,0) is in the
# negative offset of the region..
sub_screenshot_frame_location = Point(-region.left, -region.top)
# FIXME Calculate relative region location? (same as the java version)
screenshot = image_utils.get_image_part(self._screenshot, sub_screenshot_region)
return EyesWebDriverScreenshot(self._driver, screenshot,
is_viewport_screenshot=self._is_viewport_screenshot,
frame_location_in_screenshot=sub_screenshot_frame_location)
def get_element_region_in_frame_viewport(self, element):
location, size = element.location, element.size
relative_location = self.get_location_relative_to_frame_viewport(location)
x, y = relative_location['x'], relative_location['y']
width, height = size['width'], size['height']
# We only care about the part of the element which is in the viewport.
if x < 0:
diff = -x
# IMPORTANT the diff is between the original location and the viewport's bounds.
width -= diff
x = 0
if y < 0:
diff = -y
height -= diff
y = 0
if width <= 0 or height <= 0:
raise OutOfBoundsError("Element's region is outside the viewport! [(%d, %d) %d x %d]" %
(location['x'], location['y'], size['width'], size['height']))
return Region(x, y, width, height)
def get_intersected_region(self, region):
region_in_screenshot = region.clone()
region_in_screenshot.left += self._frame_location_in_screenshot.x
region_in_screenshot.top += self._frame_location_in_screenshot.y
region_in_screenshot.intersect(self._frame_screenshot_intersect)
return region_in_screenshot
def get_viewport_screenshot(self):
# if screenshot if full page
if not self._is_viewport_screenshot and not eyes_selenium_utils.is_mobile_device(self._driver):
return self.get_sub_screenshot_by_region(
Region(top=self._scroll_position.y, height=self._viewport_size['height'],
width=self._viewport_size['width']))
return self
|
apache-2.0
|
spMohanty/TranslatorsDesk
|
translatorsdesk/public/views.py
|
2
|
8582
|
# -*- coding: utf-8 -*-
'''Public section, including homepage and signup.'''
from flask import (Blueprint, request, render_template, flash, url_for,
redirect, session, jsonify, redirect, request, current_app,
abort)
from flask.ext.login import login_user, login_required, logout_user
from translatorsdesk.extensions import login_manager
from translatorsdesk.user.models import User
from translatorsdesk.public.forms import LoginForm
from translatorsdesk.user.forms import RegisterForm
from translatorsdesk.utils import flash_errors
from translatorsdesk.database import db
import translatorsdesk.worker_functions as worker_functions
import polib
import datetime, uuid, os
blueprint = Blueprint('public', __name__, static_folder="../static")
from rq import Queue
from redis import Redis
#TO-DO : Change this to a redis pool
redis_conn = Redis()
q = Queue(connection=redis_conn)
@login_manager.user_loader
def load_user(id):
return User.get_by_id(int(id))
@blueprint.route("/", methods=["GET", "POST"])
def home():
form = LoginForm(request.form)
# Handle logging in
if request.method == 'POST':
if form.validate_on_submit():
login_user(form.user)
flash("You are logged in.", 'success')
redirect_url = request.args.get("next") or url_for("user.members")
return redirect(redirect_url)
else:
flash_errors(form)
return render_template("public/home.html", form=form)
@blueprint.route('/logout/')
@login_required
def logout():
logout_user()
flash('You are logged out.', 'info')
return redirect(url_for('public.home'))
@blueprint.route("/register/", methods=['GET', 'POST'])
def register():
form = RegisterForm(request.form, csrf_enabled=False)
if form.validate_on_submit():
new_user = User.create(username=form.username.data,
email=form.email.data,
password=form.password.data,
active=True)
flash("Thank you for registering. You can now log in.", 'success')
return redirect(url_for('public.home'))
else:
flash_errors(form)
return render_template('public/register.html', form=form)
@blueprint.route("/about/")
def about():
form = LoginForm(request.form)
return render_template("public/about.html", form=form)
"""
Handles file uploads
"""
@blueprint.route('/upload', methods=['POST'])
def upload():
if request.method == 'POST':
file = request.files.get('file', None)
raw_text = request.values.get("raw_text", None)
print file
print raw_text
if file:
if _allowed_file(file.filename):
_uuid = str(uuid.uuid4())
secure_filename = file.filename.replace('/', "_").replace('\\', '_')
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], _uuid, secure_filename)
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
file.save(filepath)
else:
return jsonify({"success": False, "message": "File Type not supported yet!!"})
elif raw_text:
print "I am in raw_text"
_uuid = str(uuid.uuid4())
secure_filename = "raw_text.txt"
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], _uuid, secure_filename)
if not os.path.exists(os.path.dirname(filepath)):
os.makedirs(os.path.dirname(filepath))
print "Made dir"
f = open(filepath, 'w')
f.write(raw_text)
f.close()
print "made file"
if file or raw_text:
## Add Job to Queue
src = request.values.get("src", None)
tgt = request.values.get("tgt", None)
print src, tgt
if not (src and tgt):
return jsonify({"success": False, "message": "Source and Target Languages not specified!!"})
#CLEAN SRC AND TGT VAR
src = src.strip('\n').strip('\r').strip()
tgt = tgt.strip('\n').strip('\r').strip()
job = q.enqueue_call(func=worker_functions.process_input_file, args=(filepath, src, tgt))
return jsonify({"success":True, "filename":secure_filename, "uuid": _uuid })
else:
return jsonify({"success": False, "message": "Corrupt File"})
def _allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1] in current_app.config['ALLOWED_FILE_EXTENSIONS']
"""
Helper functions for Translate
"""
"""
Checks if a uid, fileName pair exists
"""
def fileExists(uid, fileName):
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName)
if os.path.exists(filepath):
return True
else:
return False
"""
Checks if the XLIFF file for a uid, fileName pair exists
Note: This assumes that the uid and fileName pair exists
"""
def fileXLIFFExists(uid, fileName):
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName+".xlf")
if os.path.exists(filepath):
return True
else:
return False
def returnFileData(uid, fileName):
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName)
f = open(filepath, 'r')
data = f.read()
f.close()
return data
def get_redis_connection():
return Redis()
"""
Handles Computer Assisted Translation of a particular xliff file
"""
@blueprint.route('/translate/<uid>/<fileName>/', methods=['GET'])
def translate(uid, fileName):
##Check if the uid and filename exists
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
if len(_status) >0 and (_status[0]=="GENERATING_TRANSLATED_PO_FILE:::COMPLETE" or _status[0].startswith("OUTPUT_FILE_GENERATED") ) :
if fileExists(uid, fileName):
if(fileExists(uid, fileName+".po")):
po = polib.pofile(os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName+".po"))
valid_entries = [e for e in po if not e.obsolete]
d = []
for entry in valid_entries:
if entry.msgid.strip() != "":
_tgt_lang = r_conn.lrange("lang_"+uid+"/"+fileName, 0, -1)
d.append({"src":entry.msgid,"tgt":entry.msgstr,"tgt_lang":_tgt_lang[0]})
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
return render_template('public/translate.html',\
fileName=fileName,
uid=uid,
status = _status,
PO = {'po':True, 'data':d}
)
else:
return abort(404)
else:
return abort(404)
else:
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
return render_template('public/translate.html',\
fileName=fileName,
uid=uid,
status=_status,
PO = False
)
import subprocess
@blueprint.route('/preview', methods=['POST'])
def preview():
data = request.json
fileName = data['fileName']
uid = data['uid']
po = polib.POFile()
for _d in data['data']:
_msgid = _d['src'].strip()
_msgstr = _d['tgt'].strip()
entry = polib.POEntry(
msgid=unicode(_msgid),
msgstr=unicode(_msgstr),
)
po.append(entry)
print data
po.save(os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName+".updated.po"))
filepath = os.path.join(current_app.config['UPLOAD_FOLDER'], uid, fileName)
job = q.enqueue_call(func=worker_functions.generateOutputFile, args=(filepath,))
return "#";
@blueprint.route('/status/<uid>/<fileName>', methods=['GET'])
def status(uid, fileName):
r_conn = get_redis_connection()
_status = r_conn.lrange("state_"+uid+"/"+fileName, 0, -1)
if len(_status) > 0 and _status[0].startswith("OUTPUT_FILE_GENERATED"):
return jsonify({'file':_status[0].split(":::")[-1], 'fileReady':True})
else:
return jsonify({'fileReady':False})
|
bsd-3-clause
|
raschuetz/foundations-homework
|
07/data-analysis/lib/python3.5/site-packages/pip/_vendor/requests/packages/chardet/euctwprober.py
|
2994
|
1676
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .mbcharsetprober import MultiByteCharSetProber
from .codingstatemachine import CodingStateMachine
from .chardistribution import EUCTWDistributionAnalysis
from .mbcssm import EUCTWSMModel
class EUCTWProber(MultiByteCharSetProber):
def __init__(self):
MultiByteCharSetProber.__init__(self)
self._mCodingSM = CodingStateMachine(EUCTWSMModel)
self._mDistributionAnalyzer = EUCTWDistributionAnalysis()
self.reset()
def get_charset_name(self):
return "EUC-TW"
|
mit
|
ewels/genomics-status
|
status/production.py
|
2
|
10980
|
""" Handlers related to data production.
"""
from collections import OrderedDict
import cStringIO
from datetime import datetime
import json
from dateutil import parser
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import tornado.web
from status.util import dthandler, SafeHandler
from dateutil import parser
class ProductionCronjobsHandler(SafeHandler):
""" Returns a JSON document with the Cronjobs database information
"""
def get(self):
cronjobs = {}
servers = self.application.cronjobs_db.view('server/alias')
for server in servers.rows:
doc = self.application.cronjobs_db.get(server.value)
cronjobs[server.key] = {"last_updated": datetime.strftime(parser.parse(doc['Last updated']), '%Y-%m-%d %H:%M'),
'users': doc['users'], 'server': server.key}
template = self.application.loader.load("cronjobs.html")
self.write(template.generate(gs_globals=self.application.gs_globals,
cronjobs=cronjobs))
class DeliveredMonthlyDataHandler(SafeHandler):
""" Gives the data for monthly delivered amount of basepairs.
Loaded through /api/v1/delivered_monthly url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.delivered(start_date, end_date), default=dthandler))
def delivered(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.projects_db.view("date/m_bp_delivered",
group_level=3)
delivered = OrderedDict()
start = [start_date.year,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = row.key[0]
m = row.key[2]
delivered[dthandler(datetime(y, m, 1))] = int(row.value * 1e6)
return delivered
class DeliveredMonthlyPlotHandler(DeliveredMonthlyDataHandler):
""" Gives a bar plot for monthly delivered amount of basepairs.
Loaded through /api/v1/delivered_monthly.png url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
delivered = self.delivered(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in delivered.keys()]
values = delivered.values()
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels([d.strftime("%Y\n%B") for d in dates])
ax.set_title("Basepairs delivered per month")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
delivered = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(delivered))
self.write(delivered)
class DeliveredQuarterlyDataHandler(SafeHandler):
""" Gives the data for quarterly delivered amount of basepairs.
Loaded through /api/v1/delivered_quarterly url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.delivered(start_date, end_date), default=dthandler))
def delivered(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.projects_db.view("date/m_bp_delivered",
group_level=2)
delivered = OrderedDict()
start = [start_date.year,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = row.key[0]
q = row.key[1]
delivered[dthandler(datetime(y, (q - 1) * 3 + 1, 1))] = int(row.value * 1e6)
return delivered
class DeliveredQuarterlyPlotHandler(DeliveredQuarterlyDataHandler):
""" Gives a bar plot for quarterly delivered amount of basepairs.
Loaded through /api/v1/delivered_quarterly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
delivered = self.delivered(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in delivered.keys()]
values = delivered.values()
ax.bar(dates, values)
ax.set_xticks(dates)
labels = []
for d in dates:
labels.append("{}\nQ{}".format(d.year, (d.month - 1) // 3 + 1))
ax.set_xticklabels(labels)
ax.set_title("Basepairs delivered per quarter")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
delivered = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(delivered))
self.write(delivered)
class ProducedMonthlyDataHandler(SafeHandler):
""" Serves the amount of data produced per month.
Loaded through /api/v1/produced_monthly
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.bpcounts(start_date, end_date), default=dthandler))
def bpcounts(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.samples_db.view("barcodes/date_read_counts",
group_level=3)
produced = OrderedDict()
start = [start_date.year - 2000,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year - 2000,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = int("20" + str(row.key[0]))
m = row.key[2]
produced[dthandler(datetime(y, m, 1))] = row.value
return produced
class ProducedMonthlyPlotHandler(ProducedMonthlyDataHandler):
""" Serves a plot of amount of data produced per month.
Loaded through /api/v1/produced_monthly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
produced = self.bpcounts(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in produced.keys()]
values = produced.values()
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels([d.strftime("%b-%Y") for d in dates], rotation=30)
ax.set_title("Basepairs produced per month")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
produced = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(produced))
self.write(produced)
class ProducedQuarterlyDataHandler(SafeHandler):
""" Gives the data for quarterly produced amount of basepairs.
Loaded through /api/v1/produced_quarterly
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.produced(start_date, end_date), default=dthandler))
def produced(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.samples_db.view("barcodes/date_read_counts",
group_level=2)
produced = OrderedDict()
start = [start_date.year - 2000,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year - 2000,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = int("20" + str(row.key[0]))
q = row.key[1]
produced[dthandler(datetime(y, (q - 1) * 3 + 1, 1))] = int(row.value)
return produced
class ProducedQuarterlyPlotHandler(ProducedQuarterlyDataHandler):
""" Gives a bar plot for quarterly produced amount of basepairs.
Loaded through /api/v1/produced_quarterly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
produced = self.produced(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in produced.keys()]
values = produced.values()
quarters = [(d.month - 1) // 3 + 1 for d in dates]
years = [d.year for d in dates]
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels(["{}\nQ{}".format(*t) for t in zip(years, quarters)])
ax.set_title("Basepairs produced per quarter")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
produced = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(produced))
self.write(produced)
|
mit
|
wastholm/bitcoin
|
qa/rpc-tests/test_framework/blockstore.py
|
95
|
4447
|
# BlockStore: a helper class that keeps a map of blocks and implements
# helper functions for responding to getheaders and getdata,
# and for constructing a getheaders message
#
from mininode import *
import dbm
class BlockStore(object):
def __init__(self, datadir):
self.blockDB = dbm.open(datadir + "/blocks", 'c')
self.currentBlock = 0L
self.headers_map = dict()
def close(self):
self.blockDB.close()
def get(self, blockhash):
serialized_block = None
try:
serialized_block = self.blockDB[repr(blockhash)]
except KeyError:
return None
f = cStringIO.StringIO(serialized_block)
ret = CBlock()
ret.deserialize(f)
ret.calc_sha256()
return ret
def get_header(self, blockhash):
try:
return self.headers_map[blockhash]
except KeyError:
return None
# Note: this pulls full blocks out of the database just to retrieve
# the headers -- perhaps we could keep a separate data structure
# to avoid this overhead.
def headers_for(self, locator, hash_stop, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
current_block_header = self.get_header(current_tip)
if current_block_header is None:
return None
response = msg_headers()
headersList = [ current_block_header ]
maxheaders = 2000
while (headersList[0].sha256 not in locator.vHave):
prevBlockHash = headersList[0].hashPrevBlock
prevBlockHeader = self.get_header(prevBlockHash)
if prevBlockHeader is not None:
headersList.insert(0, prevBlockHeader)
else:
break
headersList = headersList[:maxheaders] # truncate if we have too many
hashList = [x.sha256 for x in headersList]
index = len(headersList)
if (hash_stop in hashList):
index = hashList.index(hash_stop)+1
response.headers = headersList[:index]
return response
def add_block(self, block):
block.calc_sha256()
try:
self.blockDB[repr(block.sha256)] = bytes(block.serialize())
except TypeError as e:
print "Unexpected error: ", sys.exc_info()[0], e.args
self.currentBlock = block.sha256
self.headers_map[block.sha256] = CBlockHeader(block)
def add_header(self, header):
self.headers_map[header.sha256] = header
def get_blocks(self, inv):
responses = []
for i in inv:
if (i.type == 2): # MSG_BLOCK
block = self.get(i.hash)
if block is not None:
responses.append(msg_block(block))
return responses
def get_locator(self, current_tip=None):
if current_tip is None:
current_tip = self.currentBlock
r = []
counter = 0
step = 1
lastBlock = self.get(current_tip)
while lastBlock is not None:
r.append(lastBlock.hashPrevBlock)
for i in range(step):
lastBlock = self.get(lastBlock.hashPrevBlock)
if lastBlock is None:
break
counter += 1
if counter > 10:
step *= 2
locator = CBlockLocator()
locator.vHave = r
return locator
class TxStore(object):
def __init__(self, datadir):
self.txDB = dbm.open(datadir + "/transactions", 'c')
def close(self):
self.txDB.close()
def get(self, txhash):
serialized_tx = None
try:
serialized_tx = self.txDB[repr(txhash)]
except KeyError:
return None
f = cStringIO.StringIO(serialized_tx)
ret = CTransaction()
ret.deserialize(f)
ret.calc_sha256()
return ret
def add_transaction(self, tx):
tx.calc_sha256()
try:
self.txDB[repr(tx.sha256)] = bytes(tx.serialize())
except TypeError as e:
print "Unexpected error: ", sys.exc_info()[0], e.args
def get_transactions(self, inv):
responses = []
for i in inv:
if (i.type == 1): # MSG_TX
tx = self.get(i.hash)
if tx is not None:
responses.append(msg_tx(tx))
return responses
|
mit
|
zoyahav/incubator-airflow
|
airflow/utils/dag_processing.py
|
20
|
23293
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import re
import time
from abc import ABCMeta, abstractmethod
from collections import defaultdict
from datetime import datetime
from airflow.exceptions import AirflowException
from airflow.dag.base_dag import BaseDag, BaseDagBag
from airflow.utils.logging import LoggingMixin
class SimpleDag(BaseDag):
"""
A simplified representation of a DAG that contains all attributes
required for instantiating and scheduling its associated tasks.
"""
def __init__(self,
dag_id,
task_ids,
full_filepath,
concurrency,
is_paused,
pickle_id):
"""
:param dag_id: ID of the DAG
:type dag_id: unicode
:param task_ids: task IDs associated with the DAG
:type task_ids: list[unicode]
:param full_filepath: path to the file containing the DAG e.g.
/a/b/c.py
:type full_filepath: unicode
:param concurrency: No more than these many tasks from the
dag should run concurrently
:type concurrency: int
:param is_paused: Whether or not this DAG is paused. Tasks from paused
DAGs are not scheduled
:type is_paused: bool
:param pickle_id: ID associated with the pickled version of this DAG.
:type pickle_id: unicode
"""
self._dag_id = dag_id
self._task_ids = task_ids
self._full_filepath = full_filepath
self._is_paused = is_paused
self._concurrency = concurrency
self._pickle_id = pickle_id
@property
def dag_id(self):
"""
:return: the DAG ID
:rtype: unicode
"""
return self._dag_id
@property
def task_ids(self):
"""
:return: A list of task IDs that are in this DAG
:rtype: list[unicode]
"""
return self._task_ids
@property
def full_filepath(self):
"""
:return: The absolute path to the file that contains this DAG's definition
:rtype: unicode
"""
return self._full_filepath
@property
def concurrency(self):
"""
:return: maximum number of tasks that can run simultaneously from this DAG
:rtype: int
"""
return self._concurrency
@property
def is_paused(self):
"""
:return: whether this DAG is paused or not
:rtype: bool
"""
return self._is_paused
@property
def pickle_id(self):
"""
:return: The pickle ID for this DAG, if it has one. Otherwise None.
:rtype: unicode
"""
return self._pickle_id
class SimpleDagBag(BaseDagBag):
"""
A collection of SimpleDag objects with some convenience methods.
"""
def __init__(self, simple_dags):
"""
Constructor.
:param simple_dags: SimpleDag objects that should be in this
:type: list(SimpleDag)
"""
self.simple_dags = simple_dags
self.dag_id_to_simple_dag = {}
for simple_dag in simple_dags:
self.dag_id_to_simple_dag[simple_dag.dag_id] = simple_dag
@property
def dag_ids(self):
"""
:return: IDs of all the DAGs in this
:rtype: list[unicode]
"""
return self.dag_id_to_simple_dag.keys()
def get_dag(self, dag_id):
"""
:param dag_id: DAG ID
:type dag_id: unicode
:return: if the given DAG ID exists in the bag, return the BaseDag
corresponding to that ID. Otherwise, throw an Exception
:rtype: SimpleDag
"""
if dag_id not in self.dag_id_to_simple_dag:
raise AirflowException("Unknown DAG ID {}".format(dag_id))
return self.dag_id_to_simple_dag[dag_id]
def list_py_file_paths(directory, safe_mode=True):
"""
Traverse a directory and look for Python files.
:param directory: the directory to traverse
:type directory: unicode
:param safe_mode: whether to use a heuristic to determine whether a file
contains Airflow DAG definitions
:return: a list of paths to Python files in the specified directory
:rtype: list[unicode]
"""
file_paths = []
if directory is None:
return []
elif os.path.isfile(directory):
return [directory]
elif os.path.isdir(directory):
patterns = []
for root, dirs, files in os.walk(directory, followlinks=True):
ignore_file = [f for f in files if f == '.airflowignore']
if ignore_file:
f = open(os.path.join(root, ignore_file[0]), 'r')
patterns += [p for p in f.read().split('\n') if p]
f.close()
for f in files:
try:
file_path = os.path.join(root, f)
if not os.path.isfile(file_path):
continue
mod_name, file_ext = os.path.splitext(
os.path.split(file_path)[-1])
if file_ext != '.py':
continue
if any([re.findall(p, file_path) for p in patterns]):
continue
# Heuristic that guesses whether a Python file contains an
# Airflow DAG definition.
might_contain_dag = True
if safe_mode:
with open(file_path, 'rb') as f:
content = f.read()
might_contain_dag = all(
[s in content for s in (b'DAG', b'airflow')])
if not might_contain_dag:
continue
file_paths.append(file_path)
except Exception:
logging.exception("Error while examining %s", f)
return file_paths
class AbstractDagFileProcessor(object):
"""
Processes a DAG file. See SchedulerJob.process_file() for more details.
"""
__metaclass__ = ABCMeta
@abstractmethod
def start(self):
"""
Launch the process to process the file
"""
raise NotImplementedError()
@abstractmethod
def terminate(self, sigkill=False):
"""
Terminate (and then kill) the process launched to process the file
"""
raise NotImplementedError()
@property
@abstractmethod
def pid(self):
"""
:return: the PID of the process launched to process the given file
"""
raise NotImplementedError()
@property
@abstractmethod
def exit_code(self):
"""
After the process is finished, this can be called to get the return code
:return: the exit code of the process
:rtype: int
"""
raise NotImplementedError()
@property
@abstractmethod
def done(self):
"""
Check if the process launched to process this file is done.
:return: whether the process is finished running
:rtype: bool
"""
raise NotImplementedError()
@property
@abstractmethod
def result(self):
"""
:return: result of running SchedulerJob.process_file()
:rtype: list[SimpleDag]
"""
raise NotImplementedError()
@property
@abstractmethod
def start_time(self):
"""
:return: When this started to process the file
:rtype: datetime
"""
raise NotImplementedError()
@property
@abstractmethod
def log_file(self):
"""
:return: the log file associated with this processor
:rtype: unicode
"""
raise NotImplementedError()
@property
@abstractmethod
def file_path(self):
"""
:return: the path to the file that this is processing
:rtype: unicode
"""
raise NotImplementedError()
class DagFileProcessorManager(LoggingMixin):
"""
Given a list of DAG definition files, this kicks off several processors
in parallel to process them. The parallelism is limited and as the
processors finish, more are launched. The files are processed over and
over again, but no more often than the specified interval.
:type _file_path_queue: list[unicode]
:type _processors: dict[unicode, AbstractDagFileProcessor]
:type _last_runtime: dict[unicode, float]
:type _last_finish_time: dict[unicode, datetime]
"""
def __init__(self,
dag_directory,
file_paths,
parallelism,
process_file_interval,
child_process_log_directory,
max_runs,
processor_factory):
"""
:param dag_directory: Directory where DAG definitions are kept. All
files in file_paths should be under this directory
:type dag_directory: unicode
:param file_paths: list of file paths that contain DAG definitions
:type file_paths: list[unicode]
:param parallelism: maximum number of simultaneous process to run at once
:type parallelism: int
:param process_file_interval: process a file at most once every this
many seconds
:type process_file_interval: float
:param max_runs: The number of times to parse and schedule each file. -1
for unlimited.
:type max_runs: int
:param child_process_log_directory: Store logs for child processes in
this directory
:type child_process_log_directory: unicode
:type process_file_interval: float
:param processor_factory: function that creates processors for DAG
definition files. Arguments are (dag_definition_path, log_file_path)
:type processor_factory: (unicode, unicode) -> (AbstractDagFileProcessor)
"""
self._file_paths = file_paths
self._file_path_queue = []
self._parallelism = parallelism
self._dag_directory = dag_directory
self._max_runs = max_runs
self._process_file_interval = process_file_interval
self._child_process_log_directory = child_process_log_directory
self._processor_factory = processor_factory
# Map from file path to the processor
self._processors = {}
# Map from file path to the last runtime
self._last_runtime = {}
# Map from file path to the last finish time
self._last_finish_time = {}
# Map from file path to the number of runs
self._run_count = defaultdict(int)
# Scheduler heartbeat key.
self._heart_beat_key = 'heart-beat'
@property
def file_paths(self):
return self._file_paths
def get_pid(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the PID of the process processing the given file or None if
the specified file is not being processed
:rtype: int
"""
if file_path in self._processors:
return self._processors[file_path].pid
return None
def get_all_pids(self):
"""
:return: a list of the PIDs for the processors that are running
:rtype: List[int]
"""
return [x.pid for x in self._processors.values()]
def get_runtime(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the current runtime (in seconds) of the process that's
processing the specified file or None if the file is not currently
being processed
"""
if file_path in self._processors:
return (datetime.now() - self._processors[file_path].start_time)\
.total_seconds()
return None
def get_last_runtime(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the runtime (in seconds) of the process of the last run, or
None if the file was never processed.
:rtype: float
"""
return self._last_runtime.get(file_path)
def get_last_finish_time(self, file_path):
"""
:param file_path: the path to the file that was processed
:type file_path: unicode
:return: the finish time of the process of the last run, or None if the
file was never processed.
:rtype: datetime
"""
return self._last_finish_time.get(file_path)
def get_start_time(self, file_path):
"""
:param file_path: the path to the file that's being processed
:type file_path: unicode
:return: the start time of the process that's processing the
specified file or None if the file is not currently being processed
:rtype: datetime
"""
if file_path in self._processors:
return self._processors[file_path].start_time
return None
def set_file_paths(self, new_file_paths):
"""
Update this with a new set of paths to DAG definition files.
:param new_file_paths: list of paths to DAG definition files
:type new_file_paths: list[unicode]
:return: None
"""
self._file_paths = new_file_paths
self._file_path_queue = [x for x in self._file_path_queue
if x in new_file_paths]
# Stop processors that are working on deleted files
filtered_processors = {}
for file_path, processor in self._processors.items():
if file_path in new_file_paths:
filtered_processors[file_path] = processor
else:
self.logger.warning("Stopping processor for {}".format(file_path))
processor.stop()
self._processors = filtered_processors
@staticmethod
def _split_path(file_path):
"""
Return the path elements of a path as an array. E.g. /a/b/c ->
['a', 'b', 'c']
:param file_path: the file path to split
:return: a list of the elements of the file path
:rtype: list[unicode]
"""
results = []
while True:
head, tail = os.path.split(file_path)
if len(tail) != 0:
results.append(tail)
if file_path == head:
break
file_path = head
results.reverse()
return results
def _get_log_directory(self):
"""
Log output from processing DAGs for the current day should go into
this directory.
:return: the path to the corresponding log directory
:rtype: unicode
"""
now = datetime.now()
return os.path.join(self._child_process_log_directory,
now.strftime("%Y-%m-%d"))
def _get_log_file_path(self, dag_file_path):
"""
Log output from processing the specified file should go to this
location.
:param dag_file_path: file containing a DAG
:type dag_file_path: unicode
:return: the path to the corresponding log file
:rtype: unicode
"""
log_directory = self._get_log_directory()
# General approach is to put the log file under the same relative path
# under the log directory as the DAG file in the DAG directory
relative_dag_file_path = os.path.relpath(dag_file_path, start=self._dag_directory)
path_elements = self._split_path(relative_dag_file_path)
# Add a .log suffix for the log file
path_elements[-1] += ".log"
return os.path.join(log_directory, *path_elements)
def symlink_latest_log_directory(self):
"""
Create symbolic link to the current day's log directory to
allow easy access to the latest scheduler log files.
:return: None
"""
log_directory = self._get_log_directory()
latest_log_directory_path = os.path.join(
self._child_process_log_directory, "latest")
if (os.path.isdir(log_directory)):
# if symlink exists but is stale, update it
if (os.path.islink(latest_log_directory_path)):
if(os.readlink(latest_log_directory_path) != log_directory):
os.unlink(latest_log_directory_path)
os.symlink(log_directory, latest_log_directory_path)
elif (os.path.isdir(latest_log_directory_path) or
os.path.isfile(latest_log_directory_path)):
self.logger.warning("{} already exists as a dir/file. "
"Skip creating symlink."
.format(latest_log_directory_path))
else:
os.symlink(log_directory, latest_log_directory_path)
def processing_count(self):
"""
:return: the number of files currently being processed
:rtype: int
"""
return len(self._processors)
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1)
def heartbeat(self):
"""
This should be periodically called by the scheduler. This method will
kick of new processes to process DAG definition files and read the
results from the finished processors.
:return: a list of SimpleDags that were produced by processors that
have finished since the last time this was called
:rtype: list[SimpleDag]
"""
finished_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
running_processors = {}
""":type : dict[unicode, AbstractDagFileProcessor]"""
for file_path, processor in self._processors.items():
if processor.done:
self.logger.info("Processor for {} finished".format(file_path))
now = datetime.now()
finished_processors[file_path] = processor
self._last_runtime[file_path] = (now -
processor.start_time).total_seconds()
self._last_finish_time[file_path] = now
self._run_count[file_path] += 1
else:
running_processors[file_path] = processor
self._processors = running_processors
# Collect all the DAGs that were found in the processed files
simple_dags = []
for file_path, processor in finished_processors.items():
if processor.result is None:
self.logger.warning("Processor for {} exited with return code "
"{}. See {} for details."
.format(processor.file_path,
processor.exit_code,
processor.log_file))
else:
for simple_dag in processor.result:
simple_dags.append(simple_dag)
# Generate more file paths to process if we processed all the files
# already.
if len(self._file_path_queue) == 0:
# If the file path is already being processed, or if a file was
# processed recently, wait until the next batch
file_paths_in_progress = self._processors.keys()
now = datetime.now()
file_paths_recently_processed = []
for file_path in self._file_paths:
last_finish_time = self.get_last_finish_time(file_path)
if (last_finish_time is not None and
(now - last_finish_time).total_seconds() <
self._process_file_interval):
file_paths_recently_processed.append(file_path)
files_paths_at_run_limit = [file_path
for file_path, num_runs in self._run_count.items()
if num_runs == self._max_runs]
files_paths_to_queue = list(set(self._file_paths) -
set(file_paths_in_progress) -
set(file_paths_recently_processed) -
set(files_paths_at_run_limit))
for file_path, processor in self._processors.items():
self.logger.debug("File path {} is still being processed (started: {})"
.format(processor.file_path,
processor.start_time.isoformat()))
self.logger.debug("Queuing the following files for processing:\n\t{}"
.format("\n\t".join(files_paths_to_queue)))
self._file_path_queue.extend(files_paths_to_queue)
# Start more processors if we have enough slots and files to process
while (self._parallelism - len(self._processors) > 0 and
len(self._file_path_queue) > 0):
file_path = self._file_path_queue.pop(0)
log_file_path = self._get_log_file_path(file_path)
processor = self._processor_factory(file_path, log_file_path)
processor.start()
self.logger.info("Started a process (PID: {}) to generate "
"tasks for {} - logging into {}"
.format(processor.pid, file_path, log_file_path))
self._processors[file_path] = processor
self.symlink_latest_log_directory()
# Update scheduler heartbeat count.
self._run_count[self._heart_beat_key] += 1
return simple_dags
def max_runs_reached(self):
"""
:return: whether all file paths have been processed max_runs times
"""
if self._max_runs == -1: # Unlimited runs.
return False
for file_path in self._file_paths:
if self._run_count[file_path] != self._max_runs:
return False
if self._run_count[self._heart_beat_key] < self._max_runs:
return False
return True
def terminate(self):
"""
Stops all running processors
:return: None
"""
for processor in self._processors.values():
processor.terminate()
|
apache-2.0
|
jamesdavidson/corpkit
|
orientation/corpus_building.py
|
3
|
6471
|
# <headingcell level=1>
# *corpkit*: a Python-based toolkit for working with parsed linguistic corpora
# <headingcell level=2>
# <markdowncell>
# [Daniel McDonald](mailto:[email protected]?Subject=corpkit)**
#---------------------------
# <markdowncell>
# <br>
# > **SUMMARY:** This *IPython Notebook* shows you how to use `corpkit` to build a parsed corpus.
# Sophisticated corpora can't be built without a little bit of coding. The functions here aim to simplify the process as much as possible, however.
# <codecell>
import corpkit
from corpkit import x, y, z
# <headingcell level=2>
# Getting raw data
# <markdowncell>
# The function below will get all URLs listed on a webpage. You can use a `criteria` regex and `remove` to remove/keep good URLs.
# <codecell>
% load_ext soup
urls = get_urls('http://www.theage.com.au', r'theage.*political-news', remove = False)
print urls
# <markdowncell>
# You could, of course, do this recursively. It would probably be wise to use `criteria` to restrict the domain:
# <codecell>
bigger_list = []
for index, url in enumerate(urls):
bigger_list.append(url)
print 'Doing %d/%d...' % (index + 1, len(urls))
more_urls = get_urls(url, r'www\.theage\.com\.au', remove = False)
for u in more_urls:
bigger_list.append(u)
# we only want unique entries:
unique_urls = sorted(set(bigger_list))
print '%d unique URLs!' len(unique_urls)
# <headingcell level=2>
# Downloading files
# <markdowncell>
# So long as hard disk space isn't an issue, it's better to download the HTML, as the page could vanish one day. Also, one of `corpkit`'s cool features is connecting HTML to concordance lines, which at present works with downloaded HTML only.
# It's bad form to download a lot of URLs without a little but of wait time. Here, it's set to five, which is usually polite enough.
# <codecell>
# we won't use our bigger list...
downloader(urls, new_path = 'html', wait = 5)
# <markdowncell>
# When it's done, there will be an 'html' folder in the current directory.
# <headingcell level=2>
# Making a corpus from the HTML
# <markdowncell>
# Now that we have the HTML files, we can build a corpus.
# To take advantage of all of `corpkit`'s features, we will build a corpus where each file contains:
# 1. The raw text we're interested in
# 2. The spelling corrected version
# 2. The annotated versions of the spelling corrected version
# 3. The original HTML, so that we can reconstitute the text's original context.
# The other thing we'll do is structure the corpus by a metadata attribute. Rather than having a huge set of text files, we'll have a corpus with subcorpora (a folder with subfolders). `corpkit` will then be able to run over each subcorpus with the same query and plot results.
# So, our workflow will be:
# 1. Get the raw text of interest from the HTML and the metadata attribute we'll structure our corpus by
# 2. Normalise the spelling of the texts, to aid parsing
# 3. Parse the spelling corrected version with CoreNLP
# 4. Make subcorpora, and put each text, annotations and html in the right spot
# To make a well-structured corpus, some steps will require some user input, which may in turn mean that you need to write some Python from scratch. Below is a basic guide.
# <headingcell level=3>
# Get raw text and metadata
# <markdowncell>
# This is the most difficult part of the entire process, and it can't be totally automated, since what kind of text you want, and what metadata feature you want to use to structure the corpus, could be almost anything.
# We're going to use Beautiful Soup, an HTML parser, to get what we want from our text. You may well need to use [its documentation](http://www.crummy.com/software/BeautifulSoup/bs4/doc/) to figure out how to extract the right things from your HTML.
# `corpkit`'s functions use [ipython-beautifulsoup](https://github.com/Psycojoker/ipython-beautifulsoup), which very helpfully displays the raw and rendered HTML together.
# What you need to do is define two functions, `get_text` and `get_metadata`:
# <codecell>
def get_text():
return text
# <codecell>
def get_metadata():
return metadata
# <codecell>
# attempt
# <markdowncell>
# What you want to end up with is a *tuple* with two parts: the text, and the metadata.
# <codecell>
# real
# <headingcell level=4>
# The simpler approach
# <markdowncell>
# If you're in a hurry to get text from the HTML, and aren't too fussed about exactly what part of the page it comes from, you can use `simple_text_extractor()`, which relies on [justext]():
# <codecell>
simple_text_extractor()
# <headingcell level=3>
# Normalise spelling
# <codecell>
correct_spelling()
# <headingcell level=3>
# Parse normalised texts
# <headingcell level=3>
# Make subcorpora
# <markdowncell>
# There are two major approaches. One is to use a heuristic approach to get anything in the HTML that looks like natural language. This option is most useful for people who are either very new to programming, or seeking to make larger, more general corpora. The second option is to traverse the HTML, and pull out the exact things we want. This can't be fully automated, because it's very different for everyone.
# <headingcell level=3>
# via JusText
# <headingcell level=3>
# via Beautiful Soup
# <markdowncell>
# This method is more complicated, but is superior. If we want to make a corpus with structure, we want to attach metadata to every extracted text, so that we can then structure according to the metadata attribute.
# <headingcell level=2>
# Structuring the data
# <headingcell level=2>
# Parsing the data
# <markdowncell>
# We need to instal CoreNLP and a Python wrapper for it:
# <codecell>
# sudo pip install pexpect unidecode
# get the wrapper
! git clone git://github.com/arne-cl/stanford-corenlp-python.git
# go to wrapper dir
! cd stanford-corenlp-python
# get stanford corenlp
! wget http://nlp.stanford.edu/software/stanford-corenlp-full-2014-08-27.zip
# unzip it
! unzip stanford-corenlp-full-2014-08-27.zip
# install the wrapper, which installs corenlp too
! sudo python setup.py install
# go back to original dir and delete everything we no longer need
! cd ..
! rm -f -R stanford-corenlp-python
# <codecell>
corpus_path = 'data/structured_corpus'
stanford_parse(corpus_path)
# <markdowncell>
# Assuming everything went smoothly, you should now be able to interrogate the corpus. For documentation on this subject, head over to `orientation.ipynb`.
|
mit
|
sorenk/ansible
|
lib/ansible/modules/network/vyos/vyos_user.py
|
38
|
10849
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_user
version_added: "2.4"
author: "Trishna Guha (@trishnaguha)"
short_description: Manage the collection of local users on VyOS device
description:
- This module provides declarative management of the local usernames
configured on network devices. It allows playbooks to manage
either individual usernames or the collection of usernames in the
current running config. It also supports purging usernames from the
configuration that are not explicitly defined.
notes:
- Tested against VYOS 1.1.7
options:
aggregate:
description:
- The set of username objects to be configured on the remote
VyOS device. The list entries can either be the username or
a hash of username and properties. This argument is mutually
exclusive with the C(name) argument.
aliases: ['users', 'collection']
name:
description:
- The username to be configured on the VyOS device.
This argument accepts a string value and is mutually exclusive
with the C(aggregate) argument.
Please note that this option is not same as C(provider username).
full_name:
description:
- The C(full_name) argument provides the full name of the user
account to be created on the remote device. This argument accepts
any text string value.
configured_password:
description:
- The password to be configured on the VyOS device. The
password needs to be provided in clear and it will be encrypted
on the device.
Please note that this option is not same as C(provider password).
update_password:
description:
- Since passwords are encrypted in the device running config, this
argument will instruct the module when to change the password. When
set to C(always), the password will always be updated in the device
and when set to C(on_create) the password will be updated only if
the username is created.
default: always
choices: ['on_create', 'always']
level:
description:
- The C(level) argument configures the level of the user when logged
into the system. This argument accepts string values admin or operator.
aliases: ['role']
purge:
description:
- Instructs the module to consider the
resource definition absolute. It will remove any previously
configured usernames on the device with the exception of the
`admin` user (the current defined set of users).
type: bool
default: false
state:
description:
- Configures the state of the username definition
as it relates to the device operational configuration. When set
to I(present), the username(s) should be configured in the device active
configuration and when set to I(absent) the username(s) should not be
in the device active configuration
default: present
choices: ['present', 'absent']
extends_documentation_fragment: vyos
"""
EXAMPLES = """
- name: create a new user
vyos_user:
name: ansible
configured_password: password
state: present
- name: remove all users except admin
vyos_user:
purge: yes
- name: set multiple users to level operator
vyos_user:
aggregate:
- name: netop
- name: netend
level: operator
state: present
- name: Change Password for User netop
vyos_user:
name: netop
configured_password: "{{ new_password }}"
update_password: always
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system login user test level operator
- set system login user authentication plaintext-password password
"""
import re
from copy import deepcopy
from functools import partial
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.utils import remove_default_spec
from ansible.module_utils.network.vyos.vyos import get_config, load_config
from ansible.module_utils.six import iteritems
from ansible.module_utils.network.vyos.vyos import vyos_argument_spec
def validate_level(value, module):
if value not in ('admin', 'operator'):
module.fail_json(msg='level must be either admin or operator, got %s' % value)
def spec_to_commands(updates, module):
commands = list()
state = module.params['state']
update_password = module.params['update_password']
def needs_update(want, have, x):
return want.get(x) and (want.get(x) != have.get(x))
def add(command, want, x):
command.append('set system login user %s %s' % (want['name'], x))
for update in updates:
want, have = update
if want['state'] == 'absent':
commands.append('delete system login user %s' % want['name'])
continue
if needs_update(want, have, 'level'):
add(commands, want, "level %s" % want['level'])
if needs_update(want, have, 'full_name'):
add(commands, want, "full-name %s" % want['full_name'])
if needs_update(want, have, 'configured_password'):
if update_password == 'always' or not have:
add(commands, want, 'authentication plaintext-password %s' % want['configured_password'])
return commands
def parse_level(data):
match = re.search(r'level (\S+)', data, re.M)
if match:
level = match.group(1)[1:-1]
return level
def parse_full_name(data):
match = re.search(r'full-name (\S+)', data, re.M)
if match:
full_name = match.group(1)[1:-1]
return full_name
def config_to_dict(module):
data = get_config(module)
match = re.findall(r'^set system login user (\S+)', data, re.M)
if not match:
return list()
instances = list()
for user in set(match):
regex = r' %s .+$' % user
cfg = re.findall(regex, data, re.M)
cfg = '\n'.join(cfg)
obj = {
'name': user,
'state': 'present',
'configured_password': None,
'level': parse_level(cfg),
'full_name': parse_full_name(cfg)
}
instances.append(obj)
return instances
def get_param_value(key, item, module):
# if key doesn't exist in the item, get it from module.params
if not item.get(key):
value = module.params[key]
# validate the param value (if validator func exists)
validator = globals().get('validate_%s' % key)
if all((value, validator)):
validator(value, module)
return value
def map_params_to_obj(module):
aggregate = module.params['aggregate']
if not aggregate:
if not module.params['name'] and module.params['purge']:
return list()
else:
users = [{'name': module.params['name']}]
else:
users = list()
for item in aggregate:
if not isinstance(item, dict):
users.append({'name': item})
else:
users.append(item)
objects = list()
for item in users:
get_value = partial(get_param_value, item=item, module=module)
item['configured_password'] = get_value('configured_password')
item['full_name'] = get_value('full_name')
item['level'] = get_value('level')
item['state'] = get_value('state')
objects.append(item)
return objects
def update_objects(want, have):
updates = list()
for entry in want:
item = next((i for i in have if i['name'] == entry['name']), None)
if item is None:
updates.append((entry, {}))
elif item:
for key, value in iteritems(entry):
if value and value != item[key]:
updates.append((entry, item))
return updates
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
full_name=dict(),
level=dict(aliases=['role']),
configured_password=dict(no_log=True),
update_password=dict(default='always', choices=['on_create', 'always']),
state=dict(default='present', choices=['present', 'absent'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec, aliases=['users', 'collection']),
purge=dict(type='bool', default=False)
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
mutually_exclusive = [('name', 'aggregate')]
module = AnsibleModule(argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
if module.params['password'] and not module.params['configured_password']:
warnings.append(
'The "password" argument is used to authenticate the current connection. ' +
'To set a user password use "configured_password" instead.'
)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(update_objects(want, have), module)
if module.params['purge']:
want_users = [x['name'] for x in want]
have_users = [x['name'] for x in have]
for item in set(have_users).difference(want_users):
commands.append('delete system login user %s' % item)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
gpl-3.0
|
SimonGriffioen/pascal
|
PASCAL/external/networkx/algorithms/operators/tests/test_all.py
|
40
|
5320
|
from nose.tools import *
import networkx as nx
from networkx.testing import *
def test_union_all_attributes():
g = nx.Graph()
g.add_node(0, x=4)
g.add_node(1, x=5)
g.add_edge(0, 1, size=5)
g.graph['name'] = 'g'
h = g.copy()
h.graph['name'] = 'h'
h.graph['attr'] = 'attr'
h.node[0]['x'] = 7
j = g.copy()
j.graph['name'] = 'j'
j.graph['attr'] = 'attr'
j.node[0]['x'] = 7
ghj = nx.union_all([g, h, j], rename=('g', 'h', 'j'))
assert_equal( set(ghj.nodes()) , set(['h0', 'h1', 'g0', 'g1', 'j0', 'j1']) )
for n in ghj:
graph, node = n
assert_equal( ghj.node[n], eval(graph).node[int(node)] )
assert_equal(ghj.graph['attr'],'attr')
assert_equal(ghj.graph['name'],'j') # j graph attributes take precendent
def test_intersection_all():
G=nx.Graph()
H=nx.Graph()
R=nx.Graph()
G.add_nodes_from([1,2,3,4])
G.add_edge(1,2)
G.add_edge(2,3)
H.add_nodes_from([1,2,3,4])
H.add_edge(2,3)
H.add_edge(3,4)
R.add_nodes_from([1,2,3,4])
R.add_edge(2,3)
R.add_edge(4,1)
I=nx.intersection_all([G,H,R])
assert_equal( set(I.nodes()) , set([1,2,3,4]) )
assert_equal( sorted(I.edges()) , [(2,3)] )
def test_intersection_all_attributes():
g = nx.Graph()
g.add_node(0, x=4)
g.add_node(1, x=5)
g.add_edge(0, 1, size=5)
g.graph['name'] = 'g'
h = g.copy()
h.graph['name'] = 'h'
h.graph['attr'] = 'attr'
h.node[0]['x'] = 7
gh = nx.intersection_all([g, h])
assert_equal( set(gh.nodes()) , set(g.nodes()) )
assert_equal( set(gh.nodes()) , set(h.nodes()) )
assert_equal( sorted(gh.edges()) , sorted(g.edges()) )
h.remove_node(0)
assert_raises(nx.NetworkXError, nx.intersection, g, h)
def test_intersection_all_multigraph_attributes():
g = nx.MultiGraph()
g.add_edge(0, 1, key=0)
g.add_edge(0, 1, key=1)
g.add_edge(0, 1, key=2)
h = nx.MultiGraph()
h.add_edge(0, 1, key=0)
h.add_edge(0, 1, key=3)
gh = nx.intersection_all([g, h])
assert_equal( set(gh.nodes()) , set(g.nodes()) )
assert_equal( set(gh.nodes()) , set(h.nodes()) )
assert_equal( sorted(gh.edges()) , [(0,1)] )
assert_equal( sorted(gh.edges(keys=True)) , [(0,1,0)] )
def test_union_all_and_compose_all():
K3=nx.complete_graph(3)
P3=nx.path_graph(3)
G1=nx.DiGraph()
G1.add_edge('A','B')
G1.add_edge('A','C')
G1.add_edge('A','D')
G2=nx.DiGraph()
G2.add_edge('1','2')
G2.add_edge('1','3')
G2.add_edge('1','4')
G=nx.union_all([G1,G2])
H=nx.compose_all([G1,G2])
assert_edges_equal(G.edges(),H.edges())
assert_false(G.has_edge('A','1'))
assert_raises(nx.NetworkXError, nx.union, K3, P3)
H1=nx.union_all([H,G1],rename=('H','G1'))
assert_equal(sorted(H1.nodes()),
['G1A', 'G1B', 'G1C', 'G1D',
'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
H2=nx.union_all([H,G2],rename=("H",""))
assert_equal(sorted(H2.nodes()),
['1', '2', '3', '4',
'H1', 'H2', 'H3', 'H4', 'HA', 'HB', 'HC', 'HD'])
assert_false(H1.has_edge('NB','NA'))
G=nx.compose_all([G,G])
assert_edges_equal(G.edges(),H.edges())
G2=nx.union_all([G2,G2],rename=('','copy'))
assert_equal(sorted(G2.nodes()),
['1', '2', '3', '4', 'copy1', 'copy2', 'copy3', 'copy4'])
assert_equal(G2.neighbors('copy4'),[])
assert_equal(sorted(G2.neighbors('copy1')),['copy2', 'copy3', 'copy4'])
assert_equal(len(G),8)
assert_equal(nx.number_of_edges(G),6)
E=nx.disjoint_union_all([G,G])
assert_equal(len(E),16)
assert_equal(nx.number_of_edges(E),12)
E=nx.disjoint_union_all([G1,G2])
assert_equal(sorted(E.nodes()),[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11])
G1=nx.DiGraph()
G1.add_edge('A','B')
G2=nx.DiGraph()
G2.add_edge(1,2)
G3=nx.DiGraph()
G3.add_edge(11,22)
G4=nx.union_all([G1,G2,G3],rename=("G1","G2","G3"))
assert_equal(sorted(G4.nodes()),
['G1A', 'G1B', 'G21', 'G22',
'G311', 'G322'])
def test_union_all_multigraph():
G=nx.MultiGraph()
G.add_edge(1,2,key=0)
G.add_edge(1,2,key=1)
H=nx.MultiGraph()
H.add_edge(3,4,key=0)
H.add_edge(3,4,key=1)
GH=nx.union_all([G,H])
assert_equal( set(GH) , set(G)|set(H))
assert_equal( set(GH.edges(keys=True)) ,
set(G.edges(keys=True))|set(H.edges(keys=True)))
def test_input_output():
l = [nx.Graph([(1,2)]),nx.Graph([(3,4)])]
U = nx.disjoint_union_all(l)
assert_equal(len(l),2)
C = nx.compose_all(l)
assert_equal(len(l),2)
l = [nx.Graph([(1,2)]),nx.Graph([(1,2)])]
R = nx.intersection_all(l)
assert_equal(len(l),2)
@raises(nx.NetworkXError)
def test_mixed_type_union():
G = nx.Graph()
H = nx.MultiGraph()
I = nx.Graph()
U = nx.union_all([G,H,I])
@raises(nx.NetworkXError)
def test_mixed_type_disjoint_union():
G = nx.Graph()
H = nx.MultiGraph()
I = nx.Graph()
U = nx.disjoint_union_all([G,H,I])
@raises(nx.NetworkXError)
def test_mixed_type_intersection():
G = nx.Graph()
H = nx.MultiGraph()
I = nx.Graph()
U = nx.intersection_all([G,H,I])
@raises(nx.NetworkXError)
def test_mixed_type_compose():
G = nx.Graph()
H = nx.MultiGraph()
I = nx.Graph()
U = nx.compose_all([G,H,I])
|
gpl-2.0
|
jbzdak/edx-platform
|
lms/djangoapps/teams/tests/test_views.py
|
3
|
59381
|
# -*- coding: utf-8 -*-
"""Tests for the teams API at the HTTP request level."""
import json
from datetime import datetime
import pytz
from dateutil import parser
import ddt
from elasticsearch.exceptions import ConnectionError
from mock import patch
from search.search_engine_base import SearchEngine
from django.core.urlresolvers import reverse
from django.conf import settings
from django.db.models.signals import post_save
from django.utils import translation
from nose.plugins.attrib import attr
from rest_framework.test import APITestCase, APIClient
from xmodule.modulestore.tests.django_utils import SharedModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from courseware.tests.factories import StaffFactory
from common.test.utils import skip_signal
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from student.models import CourseEnrollment
from util.testing import EventTestMixin
from .factories import CourseTeamFactory, LAST_ACTIVITY_AT
from ..models import CourseTeamMembership
from ..search_indexes import CourseTeamIndexer, CourseTeam, course_team_post_save_callback
from django_comment_common.models import Role, FORUM_ROLE_COMMUNITY_TA
from django_comment_common.utils import seed_permissions_roles
@attr('shard_1')
class TestDashboard(SharedModuleStoreTestCase):
"""Tests for the Teams dashboard."""
test_password = "test"
NUM_TOPICS = 10
@classmethod
def setUpClass(cls):
super(TestDashboard, cls).setUpClass()
cls.course = CourseFactory.create(
teams_configuration={
"max_team_size": 10,
"topics": [
{
"name": "Topic {}".format(topic_id),
"id": topic_id,
"description": "Description for topic {}".format(topic_id)
}
for topic_id in range(cls.NUM_TOPICS)
]
}
)
def setUp(self):
"""
Set up tests
"""
super(TestDashboard, self).setUp()
# will be assigned to self.client by default
self.user = UserFactory.create(password=self.test_password)
self.teams_url = reverse('teams_dashboard', args=[self.course.id])
def test_anonymous(self):
"""Verifies that an anonymous client cannot access the team
dashboard, and is redirected to the login page."""
anonymous_client = APIClient()
response = anonymous_client.get(self.teams_url)
redirect_url = '{0}?next={1}'.format(settings.LOGIN_URL, self.teams_url)
self.assertRedirects(response, redirect_url)
def test_not_enrolled_not_staff(self):
""" Verifies that a student who is not enrolled cannot access the team dashboard. """
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertEqual(404, response.status_code)
def test_not_enrolled_staff(self):
"""
Verifies that a user with global access who is not enrolled in the course can access the team dashboard.
"""
staff_user = UserFactory(is_staff=True, password=self.test_password)
staff_client = APIClient()
staff_client.login(username=staff_user.username, password=self.test_password)
response = staff_client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_not_staff(self):
"""
Verifies that a user without global access who is enrolled in the course can access the team dashboard.
"""
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(self.teams_url)
self.assertContains(response, "TeamsTabFactory", status_code=200)
def test_enrolled_teams_not_enabled(self):
"""
Verifies that a user without global access who is enrolled in the course cannot access the team dashboard
if the teams feature is not enabled.
"""
course = CourseFactory.create()
teams_url = reverse('teams_dashboard', args=[course.id])
CourseEnrollmentFactory.create(user=self.user, course_id=course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(teams_url)
self.assertEqual(404, response.status_code)
def test_query_counts(self):
# Enroll in the course and log in
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
# Check the query count on the dashboard With no teams
with self.assertNumQueries(17):
self.client.get(self.teams_url)
# Create some teams
for topic_id in range(self.NUM_TOPICS):
team = CourseTeamFactory.create(
name=u"Team for topic {}".format(topic_id),
course_id=self.course.id,
topic_id=topic_id,
)
# Add the user to the last team
team.add_user(self.user)
# Check the query count on the dashboard again
with self.assertNumQueries(23):
self.client.get(self.teams_url)
def test_bad_course_id(self):
"""
Verifies expected behavior when course_id does not reference an existing course or is invalid.
"""
bad_org = "badorgxxx"
bad_team_url = self.teams_url.replace(self.course.id.org, bad_org)
CourseEnrollmentFactory.create(user=self.user, course_id=self.course.id)
self.client.login(username=self.user.username, password=self.test_password)
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
bad_team_url = bad_team_url.replace(bad_org, "invalid/course/id")
response = self.client.get(bad_team_url)
self.assertEqual(404, response.status_code)
class TeamAPITestCase(APITestCase, SharedModuleStoreTestCase):
"""Base class for Team API test cases."""
test_password = 'password'
@classmethod
def setUpClass(cls):
super(TeamAPITestCase, cls).setUpClass()
teams_configuration_1 = {
'topics':
[
{
'id': 'topic_{}'.format(i),
'name': name,
'description': 'Description for topic {}.'.format(i)
} for i, name in enumerate([u'Sólar power', 'Wind Power', 'Nuclear Power', 'Coal Power'])
]
}
cls.test_course_1 = CourseFactory.create(
org='TestX',
course='TS101',
display_name='Test Course',
teams_configuration=teams_configuration_1
)
teams_configuration_2 = {
'topics':
[
{
'id': 'topic_5',
'name': 'Other Interests',
'description': 'Description for topic 5.'
},
{
'id': 'topic_6',
'name': 'Public Profiles',
'description': 'Description for topic 6.'
},
],
'max_team_size': 1
}
cls.test_course_2 = CourseFactory.create(
org='MIT',
course='6.002x',
display_name='Circuits',
teams_configuration=teams_configuration_2
)
def setUp(self):
super(TeamAPITestCase, self).setUp()
self.topics_count = 4
self.users = {
'staff': AdminFactory.create(password=self.test_password),
'course_staff': StaffFactory.create(course_key=self.test_course_1.id, password=self.test_password)
}
self.create_and_enroll_student(username='student_enrolled')
self.create_and_enroll_student(username='student_enrolled_not_on_team')
self.create_and_enroll_student(username='student_unenrolled', courses=[])
# Make this student a community TA.
self.create_and_enroll_student(username='community_ta')
seed_permissions_roles(self.test_course_1.id)
community_ta_role = Role.objects.get(name=FORUM_ROLE_COMMUNITY_TA, course_id=self.test_course_1.id)
community_ta_role.users.add(self.users['community_ta'])
# This student is enrolled in both test courses and is a member of a team in each course, but is not on the
# same team as student_enrolled.
self.create_and_enroll_student(
courses=[self.test_course_1, self.test_course_2],
username='student_enrolled_both_courses_other_team'
)
# Make this student have a public profile
self.create_and_enroll_student(
courses=[self.test_course_2],
username='student_enrolled_public_profile'
)
profile = self.users['student_enrolled_public_profile'].profile
profile.year_of_birth = 1970
profile.save()
# This student is enrolled in the other course, but not yet a member of a team. This is to allow
# course_2 to use a max_team_size of 1 without breaking other tests on course_1
self.create_and_enroll_student(
courses=[self.test_course_2],
username='student_enrolled_other_course_not_on_team'
)
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
self.solar_team = CourseTeamFactory.create(
name=u'Sólar team',
course_id=self.test_course_1.id,
topic_id='topic_0'
)
self.wind_team = CourseTeamFactory.create(name='Wind Team', course_id=self.test_course_1.id)
self.nuclear_team = CourseTeamFactory.create(name='Nuclear Team', course_id=self.test_course_1.id)
self.another_team = CourseTeamFactory.create(name='Another Team', course_id=self.test_course_2.id)
self.public_profile_team = CourseTeamFactory.create(
name='Public Profile Team',
course_id=self.test_course_2.id,
topic_id='topic_6'
)
self.search_team = CourseTeamFactory.create(
name='Search',
description='queryable text',
country='GS',
language='to',
course_id=self.test_course_2.id,
topic_id='topic_7'
)
self.chinese_team = CourseTeamFactory.create(
name=u'著文企臺個',
description=u'共樣地面較,件展冷不護者這與民教過住意,國制銀產物助音是勢一友',
country='CN',
language='zh_HANS',
course_id=self.test_course_2.id,
topic_id='topic_7'
)
self.test_team_name_id_map = {team.name: team for team in (
self.solar_team,
self.wind_team,
self.nuclear_team,
self.another_team,
self.public_profile_team,
self.search_team,
self.chinese_team,
)}
for user, course in [('staff', self.test_course_1), ('course_staff', self.test_course_1)]:
CourseEnrollment.enroll(
self.users[user], course.id, check_access=True
)
# Django Rest Framework v3 requires us to pass a request to serializers
# that have URL fields. Since we're invoking this code outside the context
# of a request, we need to simulate that there's a request.
self.solar_team.add_user(self.users['student_enrolled'])
self.nuclear_team.add_user(self.users['student_enrolled_both_courses_other_team'])
self.another_team.add_user(self.users['student_enrolled_both_courses_other_team'])
self.public_profile_team.add_user(self.users['student_enrolled_public_profile'])
def build_membership_data_raw(self, username, team):
"""Assembles a membership creation payload based on the raw values provided."""
return {'username': username, 'team_id': team}
def build_membership_data(self, username, team):
"""Assembles a membership creation payload based on the username and team model provided."""
return self.build_membership_data_raw(self.users[username].username, team.team_id)
def create_and_enroll_student(self, courses=None, username=None):
""" Creates a new student and enrolls that student in the course.
Adds the new user to the self.users dictionary with the username as the key.
Returns the username once the user has been created.
"""
if username is not None:
user = UserFactory.create(password=self.test_password, username=username)
else:
user = UserFactory.create(password=self.test_password)
courses = courses if courses is not None else [self.test_course_1]
for course in courses:
CourseEnrollment.enroll(user, course.id, check_access=True)
self.users[user.username] = user
return user.username
def login(self, user):
"""Given a user string, logs the given user in.
Used for testing with ddt, which does not have access to self in
decorators. If user is 'student_inactive', then an inactive user will
be both created and logged in.
"""
if user == 'student_inactive':
student_inactive = UserFactory.create(password=self.test_password)
self.client.login(username=student_inactive.username, password=self.test_password)
student_inactive.is_active = False
student_inactive.save()
else:
self.client.login(username=self.users[user].username, password=self.test_password)
def make_call(self, url, expected_status=200, method='get', data=None, content_type=None, **kwargs):
"""Makes a call to the Team API at the given url with method and data.
If a user is specified in kwargs, that user is first logged in.
"""
user = kwargs.pop('user', 'student_enrolled_not_on_team')
if user:
self.login(user)
func = getattr(self.client, method)
if content_type:
response = func(url, data=data, content_type=content_type)
else:
response = func(url, data=data)
self.assertEqual(
expected_status,
response.status_code,
msg="Expected status {expected} but got {actual}: {content}".format(
expected=expected_status,
actual=response.status_code,
content=response.content,
)
)
if expected_status == 200:
return json.loads(response.content)
else:
return response
def get_teams_list(self, expected_status=200, data=None, no_course_id=False, **kwargs):
"""Gets the list of teams as the given user with data as query params. Verifies expected_status."""
data = data if data else {}
if 'course_id' not in data and not no_course_id:
data.update({'course_id': self.test_course_1.id})
return self.make_call(reverse('teams_list'), expected_status, 'get', data, **kwargs)
def build_team_data(self, name="Test team", course=None, description="Filler description", **kwargs):
"""Creates the payload for creating a team. kwargs can be used to specify additional fields."""
data = kwargs
course = course if course else self.test_course_1
data.update({
'name': name,
'course_id': str(course.id),
'description': description,
})
return data
def post_create_team(self, expected_status=200, data=None, **kwargs):
"""Posts data to the team creation endpoint. Verifies expected_status."""
return self.make_call(reverse('teams_list'), expected_status, 'post', data, **kwargs)
def get_team_detail(self, team_id, expected_status=200, data=None, **kwargs):
"""Gets detailed team information for team_id. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'get', data, **kwargs)
def delete_team(self, team_id, expected_status, **kwargs):
"""Delete the given team. Verifies expected_status."""
return self.make_call(reverse('teams_detail', args=[team_id]), expected_status, 'delete', **kwargs)
def patch_team_detail(self, team_id, expected_status, data=None, **kwargs):
"""Patches the team with team_id using data. Verifies expected_status."""
return self.make_call(
reverse('teams_detail', args=[team_id]),
expected_status,
'patch',
json.dumps(data) if data else None,
'application/merge-patch+json',
**kwargs
)
def get_topics_list(self, expected_status=200, data=None, **kwargs):
"""Gets the list of topics, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('topics_list'), expected_status, 'get', data, **kwargs)
def get_topic_detail(self, topic_id, course_id, expected_status=200, data=None, **kwargs):
"""Gets a single topic, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('topics_detail', kwargs={'topic_id': topic_id, 'course_id': str(course_id)}),
expected_status,
'get',
data,
**kwargs
)
def get_membership_list(self, expected_status=200, data=None, **kwargs):
"""Gets the membership list, passing data as query params. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'get', data, **kwargs)
def post_create_membership(self, expected_status=200, data=None, **kwargs):
"""Posts data to the membership creation endpoint. Verifies expected_status."""
return self.make_call(reverse('team_membership_list'), expected_status, 'post', data, **kwargs)
def get_membership_detail(self, team_id, username, expected_status=200, data=None, **kwargs):
"""Gets an individual membership record, passing data as query params. Verifies expected_status."""
return self.make_call(
reverse('team_membership_detail', args=[team_id, username]),
expected_status,
'get',
data,
**kwargs
)
def delete_membership(self, team_id, username, expected_status=200, **kwargs):
"""Deletes an individual membership record. Verifies expected_status."""
url = reverse('team_membership_detail', args=[team_id, username]) + '?admin=true'
return self.make_call(url, expected_status, 'delete', **kwargs)
def verify_expanded_public_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'bio', 'country', 'profile_image', 'time_zone', 'language_proficiencies']:
self.assertIn(field, user)
def verify_expanded_private_user(self, user):
"""Verifies that fields exist on the returned user json indicating that it is expanded."""
for field in ['username', 'url', 'profile_image']:
self.assertIn(field, user)
for field in ['bio', 'country', 'time_zone', 'language_proficiencies']:
self.assertNotIn(field, user)
def verify_expanded_team(self, team):
"""Verifies that fields exist on the returned team json indicating that it is expanded."""
for field in ['id', 'name', 'course_id', 'topic_id', 'date_created', 'description']:
self.assertIn(field, team)
@ddt.ddt
class TestListTeamsAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team listing API endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestListTeamsAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
teams = self.get_teams_list(user=user, expected_status=status)
if status == 200:
self.assertEqual(3, teams['count'])
def test_missing_course_id(self):
self.get_teams_list(400, no_course_id=True)
def verify_names(self, data, status, names=None, **kwargs):
"""Gets a team listing with data as query params, verifies status, and then verifies team names if specified."""
teams = self.get_teams_list(data=data, expected_status=status, **kwargs)
if names is not None and 200 <= status < 300:
results = teams['results']
self.assertEqual(names, [team['name'] for team in results])
def test_filter_invalid_course_id(self):
self.verify_names({'course_id': 'no_such_course'}, 400)
def test_filter_course_id(self):
self.verify_names(
{'course_id': self.test_course_2.id},
200,
['Another Team', 'Public Profile Team', 'Search', u'著文企臺個'],
user='staff'
)
def test_filter_topic_id(self):
self.verify_names({'course_id': self.test_course_1.id, 'topic_id': 'topic_0'}, 200, [u'Sólar team'])
def test_filter_username(self):
self.verify_names({'course_id': self.test_course_1.id, 'username': 'student_enrolled'}, 200, [u'Sólar team'])
self.verify_names({'course_id': self.test_course_1.id, 'username': 'staff'}, 200, [])
@ddt.data(
(None, 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
('name', 200, ['Nuclear Team', u'Sólar team', 'Wind Team']),
# Note that "Nuclear Team" and "Solar team" have the same open_slots.
# "Solar team" comes first due to secondary sort by last_activity_at.
('open_slots', 200, ['Wind Team', u'Sólar team', 'Nuclear Team']),
# Note that "Wind Team" and "Nuclear Team" have the same last_activity_at.
# "Wind Team" comes first due to secondary sort by open_slots.
('last_activity_at', 200, [u'Sólar team', 'Wind Team', 'Nuclear Team']),
)
@ddt.unpack
def test_order_by(self, field, status, names):
# Make "Solar team" the most recently active team.
# The CourseTeamFactory sets the last_activity_at to a fixed time (in the past), so all of the
# other teams have the same last_activity_at.
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
solar_team = self.test_team_name_id_map[u'Sólar team']
solar_team.last_activity_at = datetime.utcnow().replace(tzinfo=pytz.utc)
solar_team.save()
data = {'order_by': field} if field else {}
self.verify_names(data, status, names)
def test_order_by_with_text_search(self):
data = {'order_by': 'name', 'text_search': 'search'}
self.verify_names(data, 400, [])
self.assert_no_events_were_emitted()
@ddt.data((404, {'course_id': 'no/such/course'}), (400, {'topic_id': 'no_such_topic'}))
@ddt.unpack
def test_no_results(self, status, data):
self.get_teams_list(status, data)
def test_page_size(self):
result = self.get_teams_list(200, {'page_size': 2})
self.assertEquals(2, result['num_pages'])
def test_page(self):
result = self.get_teams_list(200, {'page_size': 1, 'page': 3})
self.assertEquals(3, result['num_pages'])
self.assertIsNone(result['next'])
self.assertIsNotNone(result['previous'])
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_teams_list(200, {'expand': 'user', 'topic_id': 'topic_0'})
self.verify_expanded_private_user(result['results'][0]['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_teams_list(
200,
{
'expand': 'user',
'topic_id': 'topic_6',
'course_id': self.test_course_2.id
},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['membership'][0]['user'])
@ddt.data(
('search', ['Search']),
('queryable', ['Search']),
('Tonga', ['Search']),
('Island', ['Search']),
('not-a-query', []),
('team', ['Another Team', 'Public Profile Team']),
(u'著文企臺個', [u'著文企臺個']),
)
@ddt.unpack
def test_text_search(self, text_search, expected_team_names):
def reset_search_index():
"""Clear out the search index and reindex the teams."""
CourseTeamIndexer.engine().destroy()
for team in self.test_team_name_id_map.values():
CourseTeamIndexer.index(team)
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
self.assert_event_emitted(
'edx.team.searched',
search_text=text_search,
topic_id=None,
number_of_results=len(expected_team_names)
)
# Verify that the searches still work for a user from a different locale
with translation.override('ar'):
reset_search_index()
self.verify_names(
{'course_id': self.test_course_2.id, 'text_search': text_search},
200,
expected_team_names,
user='student_enrolled_public_profile'
)
def test_delete_removed_from_search(self):
team = CourseTeamFactory.create(
name=u'zoinks',
course_id=self.test_course_1.id,
topic_id='topic_0'
)
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[team.name],
user='staff'
)
team.delete()
self.verify_names(
{'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
200,
[],
user='staff'
)
@ddt.ddt
class TestCreateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled_not_on_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.post_create_team(status, self.build_team_data(name="New Team"), user=user)
if status == 200:
self.verify_expected_team_id(team, 'new-team')
teams = self.get_teams_list(user=user)
self.assertIn("New Team", [team['name'] for team in teams['results']])
def _expected_team_id(self, team, expected_prefix):
""" Return the team id that we'd expect given this team data and this prefix. """
return expected_prefix + '-' + team['discussion_topic_id']
def verify_expected_team_id(self, team, expected_prefix):
""" Verifies that the team id starts with the specified prefix and ends with the discussion_topic_id """
self.assertIn('id', team)
self.assertIn('discussion_topic_id', team)
self.assertEqual(team['id'], self._expected_team_id(team, expected_prefix))
def test_naming(self):
new_teams = [
self.post_create_team(data=self.build_team_data(name=name), user=self.create_and_enroll_student())
for name in ["The Best Team", "The Best Team", "A really long team name"]
]
# Check that teams with the same name have unique IDs.
self.verify_expected_team_id(new_teams[0], 'the-best-team')
self.verify_expected_team_id(new_teams[1], 'the-best-team')
self.assertNotEqual(new_teams[0]['id'], new_teams[1]['id'])
# Verify expected truncation behavior with names > 20 characters.
self.verify_expected_team_id(new_teams[2], 'a-really-long-team-n')
@ddt.data((400, {
'name': 'Bad Course ID',
'course_id': 'no_such_course',
'description': "Filler Description"
}), (404, {
'name': "Non-existent course ID",
'course_id': 'no/such/course',
'description': "Filler Description"
}))
@ddt.unpack
def test_bad_course_data(self, status, data):
self.post_create_team(status, data)
def test_student_in_team(self):
response = self.post_create_team(
400,
data=self.build_team_data(
name="Doomed team",
course=self.test_course_1,
description="Overly ambitious student"
),
user='student_enrolled'
)
self.assertEqual(
"You are already in a team in this course.",
json.loads(response.content)["user_message"]
)
@ddt.data('staff', 'course_staff', 'community_ta')
def test_privileged_create_multiple_teams(self, user):
""" Privileged users can create multiple teams, even if they are already in one. """
# First add the privileged user to a team.
self.post_create_membership(
200,
self.build_membership_data(user, self.solar_team),
user=user
)
self.post_create_team(
data=self.build_team_data(
name="Another team",
course=self.test_course_1,
description="Privileged users are the best"
),
user=user
)
@ddt.data({'description': ''}, {'name': 'x' * 1000}, {'name': ''})
def test_bad_fields(self, kwargs):
self.post_create_team(400, self.build_team_data(**kwargs))
def test_missing_name(self):
self.post_create_team(400, {
'course_id': str(self.test_course_1.id),
'description': "foobar"
})
def test_full_student_creator(self):
creator = self.create_and_enroll_student()
team = self.post_create_team(data=self.build_team_data(
name="Fully specified team",
course=self.test_course_1,
description="Another fantastic team",
topic_id='great-topic',
country='CA',
language='fr'
), user=creator)
# Verify the id (it ends with a unique hash, which is the same as the discussion_id).
self.verify_expected_team_id(team, 'fully-specified-team')
del team['id']
self.assert_event_emitted(
'edx.team.created',
team_id=self._expected_team_id(team, 'fully-specified-team'),
)
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self._expected_team_id(team, 'fully-specified-team'),
user_id=self.users[creator].id,
add_method='added_on_create'
)
# Remove date_created and discussion_topic_id because they change between test runs
del team['date_created']
del team['discussion_topic_id']
# Since membership is its own list, we want to examine this separately.
team_membership = team['membership']
del team['membership']
# verify that it's been set to a time today.
self.assertEqual(
parser.parse(team['last_activity_at']).date(),
datetime.utcnow().replace(tzinfo=pytz.utc).date()
)
del team['last_activity_at']
# Verify that the creating user gets added to the team.
self.assertEqual(len(team_membership), 1)
member = team_membership[0]['user']
self.assertEqual(member['username'], creator)
self.assertEqual(team, {
'name': 'Fully specified team',
'language': 'fr',
'country': 'CA',
'topic_id': 'great-topic',
'course_id': str(self.test_course_1.id),
'description': 'Another fantastic team'
})
@ddt.data('staff', 'course_staff', 'community_ta')
def test_membership_staff_creator(self, user):
# Verify that staff do not automatically get added to a team
# when they create one.
team = self.post_create_team(data=self.build_team_data(
name="New team",
course=self.test_course_1,
description="Another fantastic team",
), user=user)
self.assertEqual(team['membership'], [])
@ddt.ddt
class TestDetailTeamAPI(TeamAPITestCase):
"""Test cases for the team detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
team = self.get_team_detail(self.solar_team.team_id, status, user=user)
if status == 200:
self.assertEqual(team['description'], self.solar_team.description)
self.assertEqual(team['discussion_topic_id'], self.solar_team.discussion_topic_id)
self.assertEqual(parser.parse(team['last_activity_at']), LAST_ACTIVITY_AT)
def test_does_not_exist(self):
self.get_team_detail('no_such_team', 404)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_team_detail(self.solar_team.team_id, 200, {'expand': 'user'})
self.verify_expanded_private_user(result['membership'][0]['user'])
def test_expand_public_user(self):
result = self.get_team_detail(
self.public_profile_team.team_id,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['membership'][0]['user'])
@ddt.ddt
class TestDeleteTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team delete endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 204),
('course_staff', 204),
('community_ta', 204)
)
@ddt.unpack
def test_access(self, user, status):
self.delete_team(self.solar_team.team_id, status, user=user)
if status == 204:
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
def test_does_not_exist(self):
self.delete_team('nonexistent', 404)
def test_memberships_deleted(self):
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 1)
self.delete_team(self.solar_team.team_id, 204, user='staff')
self.assert_event_emitted(
'edx.team.deleted',
team_id=self.solar_team.team_id,
)
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
remove_method='team_deleted',
user_id=self.users['student_enrolled'].id
)
self.assertEqual(CourseTeamMembership.objects.filter(team=self.solar_team).count(), 0)
@ddt.ddt
class TestUpdateTeamAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the team update endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestUpdateTeamAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 403),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
prev_name = self.solar_team.name
team = self.patch_team_detail(self.solar_team.team_id, status, {'name': 'foo'}, user=user)
if status == 200:
self.assertEquals(team['name'], 'foo')
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field='name',
old=prev_name,
new='foo'
)
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 404),
('staff', 404),
('course_staff', 404),
('community_ta', 404),
)
@ddt.unpack
def test_access_bad_id(self, user, status):
self.patch_team_detail("no_such_team", status, {'name': 'foo'}, user=user)
@ddt.data(
('id', 'foobar'),
('description', ''),
('country', 'no_such_country'),
('language', 'no_such_language')
)
@ddt.unpack
def test_bad_requests(self, key, value):
self.patch_team_detail(self.solar_team.team_id, 400, {key: value}, user='staff')
@ddt.data(('country', 'US'), ('language', 'en'), ('foo', 'bar'))
@ddt.unpack
def test_good_requests(self, key, value):
if hasattr(self.solar_team, key):
prev_value = getattr(self.solar_team, key)
self.patch_team_detail(self.solar_team.team_id, 200, {key: value}, user='staff')
if hasattr(self.solar_team, key):
self.assert_event_emitted(
'edx.team.changed',
team_id=self.solar_team.team_id,
truncated=[],
field=key,
old=prev_value,
new=value
)
def test_does_not_exist(self):
self.patch_team_detail('no_such_team', 404, user='staff')
@ddt.ddt
class TestListTopicsAPI(TeamAPITestCase):
"""Test cases for the topic listing endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topics = self.get_topics_list(status, {'course_id': self.test_course_1.id}, user=user)
if status == 200:
self.assertEqual(topics['count'], self.topics_count)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_key(self, course_id):
self.get_topics_list(404, {'course_id': course_id})
def test_without_course_id(self):
self.get_topics_list(400)
@ddt.data(
(None, 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
('name', 200, ['Coal Power', 'Nuclear Power', u'Sólar power', 'Wind Power'], 'name'),
# Note that "Nuclear Power" and "Solar power" both have 2 teams. "Coal Power" and "Window Power"
# both have 0 teams. The secondary sort is alphabetical by name.
('team_count', 200, ['Nuclear Power', u'Sólar power', 'Coal Power', 'Wind Power'], 'team_count'),
('no_such_field', 400, [], None),
)
@ddt.unpack
def test_order_by(self, field, status, names, expected_ordering):
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Nuclear Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Nuclear Team 1', course_id=self.test_course_1.id, topic_id='topic_2'
)
CourseTeamFactory.create(
name=u'Nuclear Team 2', course_id=self.test_course_1.id, topic_id='topic_2'
)
data = {'course_id': self.test_course_1.id}
if field:
data['order_by'] = field
topics = self.get_topics_list(status, data)
if status == 200:
self.assertEqual(names, [topic['name'] for topic in topics['results']])
self.assertEqual(topics['sort_order'], expected_ordering)
def test_order_by_team_count_secondary(self):
"""
Ensure that the secondary sort (alphabetical) when primary sort is team_count
works across pagination boundaries.
"""
with skip_signal(
post_save,
receiver=course_team_post_save_callback,
sender=CourseTeam,
dispatch_uid='teams.signals.course_team_post_save_callback'
):
# Add 2 teams to "Wind Power", which previously had no teams.
CourseTeamFactory.create(
name=u'Wind Team 1', course_id=self.test_course_1.id, topic_id='topic_1'
)
CourseTeamFactory.create(
name=u'Wind Team 2', course_id=self.test_course_1.id, topic_id='topic_1'
)
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 1,
'order_by': 'team_count'
})
self.assertEqual(["Wind Power", u'Sólar power'], [topic['name'] for topic in topics['results']])
topics = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
'page': 2,
'order_by': 'team_count'
})
self.assertEqual(["Coal Power", "Nuclear Power"], [topic['name'] for topic in topics['results']])
def test_pagination(self):
response = self.get_topics_list(data={
'course_id': self.test_course_1.id,
'page_size': 2,
})
self.assertEqual(2, len(response['results']))
self.assertIn('next', response)
self.assertIn('previous', response)
self.assertIsNone(response['previous'])
self.assertIsNotNone(response['next'])
def test_default_ordering(self):
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
self.assertEqual(response['sort_order'], 'name')
def test_team_count(self):
"""Test that team_count is included for each topic"""
response = self.get_topics_list(data={'course_id': self.test_course_1.id})
for topic in response['results']:
self.assertIn('team_count', topic)
if topic['id'] == u'topic_0':
self.assertEqual(topic['team_count'], 1)
else:
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestDetailTopicAPI(TeamAPITestCase):
"""Test cases for the topic detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 403),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
topic = self.get_topic_detail('topic_0', self.test_course_1.id, status, user=user)
if status == 200:
for field in ('id', 'name', 'description'):
self.assertIn(field, topic)
@ddt.data('A+BOGUS+COURSE', 'A/BOGUS/COURSE')
def test_invalid_course_id(self, course_id):
self.get_topic_detail('topic_0', course_id, 404)
def test_invalid_topic_id(self):
self.get_topic_detail('no_such_topic', self.test_course_1.id, 404)
def test_team_count(self):
"""Test that team_count is included with a topic"""
topic = self.get_topic_detail(topic_id='topic_0', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 1)
topic = self.get_topic_detail(topic_id='topic_1', course_id=self.test_course_1.id)
self.assertEqual(topic['team_count'], 0)
@ddt.ddt
class TestListMembershipAPI(TeamAPITestCase):
"""Test cases for the membership list endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled', 200),
('student_enrolled_both_courses_other_team', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id}, user=user)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['user']['username'], self.users['student_enrolled'].username)
@ddt.data(
(None, 401, False),
('student_inactive', 401, False),
('student_unenrolled', 200, False),
('student_enrolled', 200, True),
('student_enrolled_both_courses_other_team', 200, True),
('staff', 200, True),
('course_staff', 200, True),
('community_ta', 200, True),
)
@ddt.unpack
def test_access_by_username(self, user, status, has_content):
membership = self.get_membership_list(status, {'username': self.users['student_enrolled'].username}, user=user)
if status == 200:
if has_content:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
else:
self.assertEqual(membership['count'], 0)
@ddt.data(
('student_enrolled_both_courses_other_team', 'TestX/TS101/Test_Course', 200, 'Nuclear Team'),
('student_enrolled_both_courses_other_team', 'MIT/6.002x/Circuits', 200, 'Another Team'),
('student_enrolled', 'TestX/TS101/Test_Course', 200, u'Sólar team'),
('student_enrolled', 'MIT/6.002x/Circuits', 400, ''),
)
@ddt.unpack
def test_course_filter_with_username(self, user, course_id, status, team_name):
membership = self.get_membership_list(
status,
{
'username': self.users[user],
'course_id': course_id
},
user=user
)
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.test_team_name_id_map[team_name].team_id)
@ddt.data(
('TestX/TS101/Test_Course', 200),
('MIT/6.002x/Circuits', 400),
)
@ddt.unpack
def test_course_filter_with_team_id(self, course_id, status):
membership = self.get_membership_list(status, {'team_id': self.solar_team.team_id, 'course_id': course_id})
if status == 200:
self.assertEqual(membership['count'], 1)
self.assertEqual(membership['results'][0]['team']['team_id'], self.solar_team.team_id)
def test_bad_course_id(self):
self.get_membership_list(404, {'course_id': 'no_such_course'})
def test_no_username_or_team_id(self):
self.get_membership_list(400, {})
def test_bad_team_id(self):
self.get_membership_list(404, {'team_id': 'no_such_team'})
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'user'})
self.verify_expanded_private_user(result['results'][0]['user'])
def test_expand_public_user(self):
result = self.get_membership_list(
200,
{'team_id': self.public_profile_team.team_id, 'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['results'][0]['user'])
def test_expand_team(self):
result = self.get_membership_list(200, {'team_id': self.solar_team.team_id, 'expand': 'team'})
self.verify_expanded_team(result['results'][0]['team'])
@ddt.ddt
class TestCreateMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership creation endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestCreateMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 404),
('student_enrolled_both_courses_other_team', 404),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
membership = self.post_create_membership(
status,
self.build_membership_data('student_enrolled_not_on_team', self.solar_team),
user=user
)
if status == 200:
self.assertEqual(membership['user']['username'], self.users['student_enrolled_not_on_team'].username)
self.assertEqual(membership['team']['team_id'], self.solar_team.team_id)
memberships = self.get_membership_list(200, {'team_id': self.solar_team.team_id})
self.assertEqual(memberships['count'], 2)
add_method = 'joined_from_team_view' if user == 'student_enrolled_not_on_team' else 'added_by_another_user'
self.assert_event_emitted(
'edx.team.learner_added',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled_not_on_team'].id,
add_method=add_method
)
else:
self.assert_no_events_were_emitted()
def test_no_username(self):
response = self.post_create_membership(400, {'team_id': self.solar_team.team_id})
self.assertIn('username', json.loads(response.content)['field_errors'])
def test_no_team(self):
response = self.post_create_membership(400, {'username': self.users['student_enrolled_not_on_team'].username})
self.assertIn('team_id', json.loads(response.content)['field_errors'])
def test_bad_team(self):
self.post_create_membership(
404,
self.build_membership_data_raw(self.users['student_enrolled'].username, 'no_such_team')
)
def test_bad_username(self):
self.post_create_membership(
404,
self.build_membership_data_raw('no_such_user', self.solar_team.team_id),
user='staff'
)
@ddt.data('student_enrolled', 'staff', 'course_staff')
def test_join_twice(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled', self.solar_team),
user=user
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
def test_join_second_team_in_course(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_both_courses_other_team', self.solar_team),
user='student_enrolled_both_courses_other_team'
)
self.assertIn('already a member', json.loads(response.content)['developer_message'])
@ddt.data('staff', 'course_staff')
def test_not_enrolled_in_team_course(self, user):
response = self.post_create_membership(
400,
self.build_membership_data('student_unenrolled', self.solar_team),
user=user
)
self.assertIn('not enrolled', json.loads(response.content)['developer_message'])
def test_over_max_team_size_in_course_2(self):
response = self.post_create_membership(
400,
self.build_membership_data('student_enrolled_other_course_not_on_team', self.another_team),
user='student_enrolled_other_course_not_on_team'
)
self.assertIn('full', json.loads(response.content)['developer_message'])
@ddt.ddt
class TestDetailMembershipAPI(TeamAPITestCase):
"""Test cases for the membership detail endpoint."""
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 200),
('student_enrolled', 200),
('staff', 200),
('course_staff', 200),
('community_ta', 200),
)
@ddt.unpack
def test_access(self, user, status):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
def test_bad_team(self):
self.get_membership_detail('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.get_membership_detail(self.solar_team.team_id, 'no_such_user', 404)
def test_no_membership(self):
self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled_not_on_team'].username,
404
)
def test_expand_private_user(self):
# Use the default user which is already private because to year_of_birth is set
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'user'}
)
self.verify_expanded_private_user(result['user'])
def test_expand_public_user(self):
result = self.get_membership_detail(
self.public_profile_team.team_id,
self.users['student_enrolled_public_profile'].username,
200,
{'expand': 'user'},
user='student_enrolled_public_profile'
)
self.verify_expanded_public_user(result['user'])
def test_expand_team(self):
result = self.get_membership_detail(
self.solar_team.team_id,
self.users['student_enrolled'].username,
200,
{'expand': 'team'}
)
self.verify_expanded_team(result['team'])
@ddt.ddt
class TestDeleteMembershipAPI(EventTestMixin, TeamAPITestCase):
"""Test cases for the membership deletion endpoint."""
def setUp(self): # pylint: disable=arguments-differ
super(TestDeleteMembershipAPI, self).setUp('lms.djangoapps.teams.utils.tracker')
@ddt.data(
(None, 401),
('student_inactive', 401),
('student_unenrolled', 404),
('student_enrolled_not_on_team', 404),
('student_enrolled', 204),
('staff', 204),
('course_staff', 204),
('community_ta', 204),
)
@ddt.unpack
def test_access(self, user, status):
self.delete_membership(
self.solar_team.team_id,
self.users['student_enrolled'].username,
status,
user=user
)
if status == 204:
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='removed_by_admin'
)
else:
self.assert_no_events_were_emitted()
def test_leave_team(self):
"""
The key difference between this test and test_access above is that
removal via "Edit Membership" and "Leave Team" emit different events
despite hitting the same API endpoint, due to the 'admin' query string.
"""
url = reverse('team_membership_detail', args=[self.solar_team.team_id, self.users['student_enrolled'].username])
self.make_call(url, 204, 'delete', user='student_enrolled')
self.assert_event_emitted(
'edx.team.learner_removed',
team_id=self.solar_team.team_id,
user_id=self.users['student_enrolled'].id,
remove_method='self_removal'
)
def test_bad_team(self):
self.delete_membership('no_such_team', self.users['student_enrolled'].username, 404)
def test_bad_username(self):
self.delete_membership(self.solar_team.team_id, 'no_such_user', 404)
def test_missing_membership(self):
self.delete_membership(self.wind_team.team_id, self.users['student_enrolled'].username, 404)
class TestElasticSearchErrors(TeamAPITestCase):
"""Test that the Team API is robust to Elasticsearch connection errors."""
ES_ERROR = ConnectionError('N/A', 'connection error', {})
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_list_teams(self, __):
"""Test that text searches return a 503 when Elasticsearch is down.
The endpoint should still return 200 when a search is not supplied."""
self.get_teams_list(
expected_status=503,
data={'course_id': self.test_course_1.id, 'text_search': 'zoinks'},
user='staff'
)
self.get_teams_list(
expected_status=200,
data={'course_id': self.test_course_1.id},
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_create_team(self, __):
"""Test that team creation is robust to Elasticsearch errors."""
self.post_create_team(
expected_status=200,
data=self.build_team_data(name='zoinks'),
user='staff'
)
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_delete_team(self, __):
"""Test that team deletion is robust to Elasticsearch errors."""
self.delete_team(self.wind_team.team_id, 204, user='staff')
@patch.object(SearchEngine, 'get_search_engine', side_effect=ES_ERROR)
def test_patch_team(self, __):
"""Test that team updates are robust to Elasticsearch errors."""
self.patch_team_detail(
self.wind_team.team_id,
200,
data={'description': 'new description'},
user='staff'
)
|
agpl-3.0
|
xyguo/scikit-learn
|
sklearn/ensemble/bagging.py
|
22
|
36582
|
"""Bagging meta-estimator."""
# Author: Gilles Louppe <[email protected]>
# License: BSD 3 clause
from __future__ import division
import itertools
import numbers
import numpy as np
from warnings import warn
from abc import ABCMeta, abstractmethod
from ..base import ClassifierMixin, RegressorMixin
from ..externals.joblib import Parallel, delayed
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..metrics import r2_score, accuracy_score
from ..tree import DecisionTreeClassifier, DecisionTreeRegressor
from ..utils import check_random_state, check_X_y, check_array, column_or_1d
from ..utils.random import sample_without_replacement
from ..utils.validation import has_fit_parameter, check_is_fitted
from ..utils.fixes import bincount
from ..utils.metaestimators import if_delegate_has_method
from ..utils.multiclass import check_classification_targets
from .base import BaseEnsemble, _partition_estimators
__all__ = ["BaggingClassifier",
"BaggingRegressor"]
MAX_INT = np.iinfo(np.int32).max
def _parallel_build_estimators(n_estimators, ensemble, X, y, sample_weight,
max_samples, seeds, total_n_estimators, verbose):
"""Private function used to build a batch of estimators within a job."""
# Retrieve settings
n_samples, n_features = X.shape
max_features = ensemble.max_features
if (not isinstance(max_samples, (numbers.Integral, np.integer)) and
(0.0 < max_samples <= 1.0)):
max_samples = int(max_samples * n_samples)
if (not isinstance(max_features, (numbers.Integral, np.integer)) and
(0.0 < max_features <= 1.0)):
max_features = int(max_features * n_features)
bootstrap = ensemble.bootstrap
bootstrap_features = ensemble.bootstrap_features
support_sample_weight = has_fit_parameter(ensemble.base_estimator_,
"sample_weight")
if not support_sample_weight and sample_weight is not None:
raise ValueError("The base estimator doesn't support sample weight")
# Build estimators
estimators = []
estimators_samples = []
estimators_features = []
for i in range(n_estimators):
if verbose > 1:
print("Building estimator %d of %d for this parallel run (total %d)..." %
(i + 1, n_estimators, total_n_estimators))
random_state = check_random_state(seeds[i])
seed = random_state.randint(MAX_INT)
estimator = ensemble._make_estimator(append=False)
try: # Not all estimator accept a random_state
estimator.set_params(random_state=seed)
except ValueError:
pass
# Draw features
if bootstrap_features:
features = random_state.randint(0, n_features, max_features)
else:
features = sample_without_replacement(n_features,
max_features,
random_state=random_state)
# Draw samples, using sample weights, and then fit
if support_sample_weight:
if sample_weight is None:
curr_sample_weight = np.ones((n_samples,))
else:
curr_sample_weight = sample_weight.copy()
if bootstrap:
indices = random_state.randint(0, n_samples, max_samples)
sample_counts = bincount(indices, minlength=n_samples)
curr_sample_weight *= sample_counts
else:
not_indices = sample_without_replacement(
n_samples,
n_samples - max_samples,
random_state=random_state)
curr_sample_weight[not_indices] = 0
estimator.fit(X[:, features], y, sample_weight=curr_sample_weight)
samples = curr_sample_weight > 0.
# Draw samples, using a mask, and then fit
else:
if bootstrap:
indices = random_state.randint(0, n_samples, max_samples)
else:
indices = sample_without_replacement(n_samples,
max_samples,
random_state=random_state)
sample_counts = bincount(indices, minlength=n_samples)
estimator.fit((X[indices])[:, features], y[indices])
samples = sample_counts > 0.
estimators.append(estimator)
estimators_samples.append(samples)
estimators_features.append(features)
return estimators, estimators_samples, estimators_features
def _parallel_predict_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute (proba-)predictions within a job."""
n_samples = X.shape[0]
proba = np.zeros((n_samples, n_classes))
for estimator, features in zip(estimators, estimators_features):
if hasattr(estimator, "predict_proba"):
proba_estimator = estimator.predict_proba(X[:, features])
if n_classes == len(estimator.classes_):
proba += proba_estimator
else:
proba[:, estimator.classes_] += \
proba_estimator[:, range(len(estimator.classes_))]
else:
# Resort to voting
predictions = estimator.predict(X[:, features])
for i in range(n_samples):
proba[i, predictions[i]] += 1
return proba
def _parallel_predict_log_proba(estimators, estimators_features, X, n_classes):
"""Private function used to compute log probabilities within a job."""
n_samples = X.shape[0]
log_proba = np.empty((n_samples, n_classes))
log_proba.fill(-np.inf)
all_classes = np.arange(n_classes, dtype=np.int)
for estimator, features in zip(estimators, estimators_features):
log_proba_estimator = estimator.predict_log_proba(X[:, features])
if n_classes == len(estimator.classes_):
log_proba = np.logaddexp(log_proba, log_proba_estimator)
else:
log_proba[:, estimator.classes_] = np.logaddexp(
log_proba[:, estimator.classes_],
log_proba_estimator[:, range(len(estimator.classes_))])
missing = np.setdiff1d(all_classes, estimator.classes_)
log_proba[:, missing] = np.logaddexp(log_proba[:, missing],
-np.inf)
return log_proba
def _parallel_decision_function(estimators, estimators_features, X):
"""Private function used to compute decisions within a job."""
return sum(estimator.decision_function(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
def _parallel_predict_regression(estimators, estimators_features, X):
"""Private function used to compute predictions within a job."""
return sum(estimator.predict(X[:, features])
for estimator, features in zip(estimators,
estimators_features))
class BaseBagging(with_metaclass(ABCMeta, BaseEnsemble)):
"""Base class for Bagging meta-estimator.
Warning: This class should not be used directly. Use derived classes
instead.
"""
@abstractmethod
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaseBagging, self).__init__(
base_estimator=base_estimator,
n_estimators=n_estimators)
self.max_samples = max_samples
self.max_features = max_features
self.bootstrap = bootstrap
self.bootstrap_features = bootstrap_features
self.oob_score = oob_score
self.warm_start = warm_start
self.n_jobs = n_jobs
self.random_state = random_state
self.verbose = verbose
def fit(self, X, y, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Returns self.
"""
return self._fit(X, y, self.max_samples, sample_weight=sample_weight)
def _fit(self, X, y, max_samples, max_depth=None, sample_weight=None):
"""Build a Bagging ensemble of estimators from the training
set (X, y).
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
y : array-like, shape = [n_samples]
The target values (class labels in classification, real numbers in
regression).
max_samples : int or float, optional (default=None)
Argument to use instead of self.max_samples.
max_depth : int, optional (default=None)
Override value used when constructing base estimator. Only
supported if the base estimator has a max_depth parameter.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Note that this is supported only if the base estimator supports
sample weighting.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
# Convert data
X, y = check_X_y(X, y, ['csr', 'csc'])
# Remap output
n_samples, self.n_features_ = X.shape
y = self._validate_y(y)
# Check parameters
self._validate_estimator()
if max_depth is not None:
self.base_estimator_.max_depth = max_depth
# if max_samples is float:
if not isinstance(max_samples, (numbers.Integral, np.integer)):
max_samples = int(max_samples * X.shape[0])
if not (0 < max_samples <= X.shape[0]):
raise ValueError("max_samples must be in (0, n_samples]")
if isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
max_features = int(self.max_features * self.n_features_)
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not self.bootstrap and self.oob_score:
raise ValueError("Out of bag estimation only available"
" if bootstrap=True")
if self.warm_start and self.oob_score:
raise ValueError("Out of bag estimate only available"
" if warm_start=False")
if hasattr(self, "oob_score_") and self.warm_start:
del self.oob_score_
if not self.warm_start or len(self.estimators_) == 0:
# Free allocated memory, if any
self.estimators_ = []
self.estimators_samples_ = []
self.estimators_features_ = []
n_more_estimators = self.n_estimators - len(self.estimators_)
if n_more_estimators < 0:
raise ValueError('n_estimators=%d must be larger or equal to '
'len(estimators_)=%d when warm_start==True'
% (self.n_estimators, len(self.estimators_)))
elif n_more_estimators == 0:
warn("Warm-start fitting without increasing n_estimators does not "
"fit new trees.")
return self
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(n_more_estimators,
self.n_jobs)
total_n_estimators = sum(n_estimators)
# Advance random state to state after training
# the first n_estimators
if self.warm_start and len(self.estimators_) > 0:
random_state.randint(MAX_INT, size=len(self.estimators_))
seeds = random_state.randint(MAX_INT, size=n_more_estimators)
all_results = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_build_estimators)(
n_estimators[i],
self,
X,
y,
sample_weight,
max_samples,
seeds[starts[i]:starts[i + 1]],
total_n_estimators,
verbose=self.verbose)
for i in range(n_jobs))
# Reduce
self.estimators_ += list(itertools.chain.from_iterable(
t[0] for t in all_results))
self.estimators_samples_ += list(itertools.chain.from_iterable(
t[1] for t in all_results))
self.estimators_features_ += list(itertools.chain.from_iterable(
t[2] for t in all_results))
if self.oob_score:
self._set_oob_score(X, y)
return self
@abstractmethod
def _set_oob_score(self, X, y):
"""Calculate out of bag predictions and score."""
def _validate_y(self, y):
# Default implementation
return column_or_1d(y, warn=True)
class BaggingClassifier(BaseBagging, ClassifierMixin):
"""A Bagging classifier.
A Bagging classifier is an ensemble meta-estimator that fits base
classifiers each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
.. versionadded:: 0.17
*warm_start* constructor parameter.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
base_estimator_ : estimator
The base estimator from which the ensemble is grown.
estimators_ : list of estimators
The collection of fitted base estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
classes_ : array of shape = [n_classes]
The classes labels.
n_classes_ : int or list
The number of classes.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_decision_function_ : array of shape = [n_samples, n_classes]
Decision function computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_decision_function_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingClassifier, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingClassifier, self)._validate_estimator(
default=DecisionTreeClassifier())
def _set_oob_score(self, X, y):
n_classes_ = self.n_classes_
classes_ = self.classes_
n_samples = y.shape[0]
predictions = np.zeros((n_samples, n_classes_))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
mask = np.ones(n_samples, dtype=np.bool)
mask[samples] = False
if hasattr(estimator, "predict_proba"):
predictions[mask, :] += estimator.predict_proba(
(X[mask, :])[:, features])
else:
p = estimator.predict((X[mask, :])[:, features])
j = 0
for i in range(n_samples):
if mask[i]:
predictions[i, p[j]] += 1
j += 1
if (predictions.sum(axis=1) == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
oob_decision_function = (predictions /
predictions.sum(axis=1)[:, np.newaxis])
oob_score = accuracy_score(y, classes_.take(np.argmax(predictions,
axis=1)))
self.oob_decision_function_ = oob_decision_function
self.oob_score_ = oob_score
def _validate_y(self, y):
y = column_or_1d(y, warn=True)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
self.n_classes_ = len(self.classes_)
return y
def predict(self, X):
"""Predict class for X.
The predicted class of an input sample is computed as the class with
the highest mean predicted probability. If base estimators do not
implement a ``predict_proba`` method, then it resorts to voting.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted classes.
"""
predicted_probabilitiy = self.predict_proba(X)
return self.classes_.take((np.argmax(predicted_probabilitiy, axis=1)),
axis=0)
def predict_proba(self, X):
"""Predict class probabilities for X.
The predicted class probabilities of an input sample is computed as
the mean predicted class probabilities of the base estimators in the
ensemble. If base estimators do not implement a ``predict_proba``
method, then it resorts to voting and the predicted class probabilities
of an input sample represents the proportion of estimators predicting
each class.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} and "
"input n_features is {1}."
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
proba = sum(all_proba) / self.n_estimators
return proba
def predict_log_proba(self, X):
"""Predict class log-probabilities for X.
The predicted class log-probabilities of an input sample is computed as
the log of the mean predicted class probabilities of the base
estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
p : array of shape = [n_samples, n_classes]
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
check_is_fitted(self, "classes_")
if hasattr(self.base_estimator_, "predict_log_proba"):
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {0} "
"and input n_features is {1} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(
self.n_estimators, self.n_jobs)
all_log_proba = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_log_proba)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X,
self.n_classes_)
for i in range(n_jobs))
# Reduce
log_proba = all_log_proba[0]
for j in range(1, len(all_log_proba)):
log_proba = np.logaddexp(log_proba, all_log_proba[j])
log_proba -= np.log(self.n_estimators)
return log_proba
else:
return np.log(self.predict_proba(X))
@if_delegate_has_method(delegate='base_estimator')
def decision_function(self, X):
"""Average of the decision functions of the base classifiers.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
score : array, shape = [n_samples, k]
The decision function of the input samples. The columns correspond
to the classes in sorted order, as they appear in the attribute
``classes_``. Regression and binary classification are special
cases with ``k == 1``, otherwise ``k==n_classes``.
"""
check_is_fitted(self, "classes_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
if self.n_features_ != X.shape[1]:
raise ValueError("Number of features of the model must "
"match the input. Model n_features is {1} and "
"input n_features is {2} "
"".format(self.n_features_, X.shape[1]))
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_decisions = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_decision_function)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
decisions = sum(all_decisions) / self.n_estimators
return decisions
class BaggingRegressor(BaseBagging, RegressorMixin):
"""A Bagging regressor.
A Bagging regressor is an ensemble meta-estimator that fits base
regressors each on random subsets of the original dataset and then
aggregate their individual predictions (either by voting or by averaging)
to form a final prediction. Such a meta-estimator can typically be used as
a way to reduce the variance of a black-box estimator (e.g., a decision
tree), by introducing randomization into its construction procedure and
then making an ensemble out of it.
This algorithm encompasses several works from the literature. When random
subsets of the dataset are drawn as random subsets of the samples, then
this algorithm is known as Pasting [1]_. If samples are drawn with
replacement, then the method is known as Bagging [2]_. When random subsets
of the dataset are drawn as random subsets of the features, then the method
is known as Random Subspaces [3]_. Finally, when base estimators are built
on subsets of both samples and features, then the method is known as
Random Patches [4]_.
Read more in the :ref:`User Guide <bagging>`.
Parameters
----------
base_estimator : object or None, optional (default=None)
The base estimator to fit on random subsets of the dataset.
If None, then the base estimator is a decision tree.
n_estimators : int, optional (default=10)
The number of base estimators in the ensemble.
max_samples : int or float, optional (default=1.0)
The number of samples to draw from X to train each base estimator.
- If int, then draw `max_samples` samples.
- If float, then draw `max_samples * X.shape[0]` samples.
max_features : int or float, optional (default=1.0)
The number of features to draw from X to train each base estimator.
- If int, then draw `max_features` features.
- If float, then draw `max_features * X.shape[1]` features.
bootstrap : boolean, optional (default=True)
Whether samples are drawn with replacement.
bootstrap_features : boolean, optional (default=False)
Whether features are drawn with replacement.
oob_score : bool
Whether to use out-of-bag samples to estimate
the generalization error.
warm_start : bool, optional (default=False)
When set to True, reuse the solution of the previous call to fit
and add more estimators to the ensemble, otherwise, just fit
a whole new ensemble.
n_jobs : int, optional (default=1)
The number of jobs to run in parallel for both `fit` and `predict`.
If -1, then the number of jobs is set to the number of cores.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
verbose : int, optional (default=0)
Controls the verbosity of the building process.
Attributes
----------
estimators_ : list of estimators
The collection of fitted sub-estimators.
estimators_samples_ : list of arrays
The subset of drawn samples (i.e., the in-bag samples) for each base
estimator.
estimators_features_ : list of arrays
The subset of drawn features for each base estimator.
oob_score_ : float
Score of the training dataset obtained using an out-of-bag estimate.
oob_prediction_ : array of shape = [n_samples]
Prediction computed with out-of-bag estimate on the training
set. If n_estimators is small it might be possible that a data point
was never left out during the bootstrap. In this case,
`oob_prediction_` might contain NaN.
References
----------
.. [1] L. Breiman, "Pasting small votes for classification in large
databases and on-line", Machine Learning, 36(1), 85-103, 1999.
.. [2] L. Breiman, "Bagging predictors", Machine Learning, 24(2), 123-140,
1996.
.. [3] T. Ho, "The random subspace method for constructing decision
forests", Pattern Analysis and Machine Intelligence, 20(8), 832-844,
1998.
.. [4] G. Louppe and P. Geurts, "Ensembles on Random Patches", Machine
Learning and Knowledge Discovery in Databases, 346-361, 2012.
"""
def __init__(self,
base_estimator=None,
n_estimators=10,
max_samples=1.0,
max_features=1.0,
bootstrap=True,
bootstrap_features=False,
oob_score=False,
warm_start=False,
n_jobs=1,
random_state=None,
verbose=0):
super(BaggingRegressor, self).__init__(
base_estimator,
n_estimators=n_estimators,
max_samples=max_samples,
max_features=max_features,
bootstrap=bootstrap,
bootstrap_features=bootstrap_features,
oob_score=oob_score,
warm_start=warm_start,
n_jobs=n_jobs,
random_state=random_state,
verbose=verbose)
def predict(self, X):
"""Predict regression target for X.
The predicted regression target of an input sample is computed as the
mean predicted regression targets of the estimators in the ensemble.
Parameters
----------
X : {array-like, sparse matrix} of shape = [n_samples, n_features]
The training input samples. Sparse matrices are accepted only if
they are supported by the base estimator.
Returns
-------
y : array of shape = [n_samples]
The predicted values.
"""
check_is_fitted(self, "estimators_features_")
# Check data
X = check_array(X, accept_sparse=['csr', 'csc'])
# Parallel loop
n_jobs, n_estimators, starts = _partition_estimators(self.n_estimators,
self.n_jobs)
all_y_hat = Parallel(n_jobs=n_jobs, verbose=self.verbose)(
delayed(_parallel_predict_regression)(
self.estimators_[starts[i]:starts[i + 1]],
self.estimators_features_[starts[i]:starts[i + 1]],
X)
for i in range(n_jobs))
# Reduce
y_hat = sum(all_y_hat) / self.n_estimators
return y_hat
def _validate_estimator(self):
"""Check the estimator and set the base_estimator_ attribute."""
super(BaggingRegressor, self)._validate_estimator(
default=DecisionTreeRegressor())
def _set_oob_score(self, X, y):
n_samples = y.shape[0]
predictions = np.zeros((n_samples,))
n_predictions = np.zeros((n_samples,))
for estimator, samples, features in zip(self.estimators_,
self.estimators_samples_,
self.estimators_features_):
mask = np.ones(n_samples, dtype=np.bool)
mask[samples] = False
predictions[mask] += estimator.predict((X[mask, :])[:, features])
n_predictions[mask] += 1
if (n_predictions == 0).any():
warn("Some inputs do not have OOB scores. "
"This probably means too few estimators were used "
"to compute any reliable oob estimates.")
n_predictions[n_predictions == 0] = 1
predictions /= n_predictions
self.oob_prediction_ = predictions
self.oob_score_ = r2_score(y, predictions)
|
bsd-3-clause
|
albmarvil/The-Eternal-Sorrow
|
dependencies/luabind/boost-build/test/core_dependencies.py
|
4
|
3503
|
#!/usr/bin/python
# Copyright 2003 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)
# This tests correct handling of dependencies, specifically, on
# generated sources, and from generated sources.
import BoostBuild
from string import find
t = BoostBuild.Tester(pass_toolset=0)
t.write("core-dependency-helpers", """
rule hdrrule
{
INCLUDES $(1) : $(2) ;
}
actions copy
{
cp $(>) $(<)
}
""")
code = """include core-dependency-helpers ;
DEPENDS all : a ;
DEPENDS a : b ;
actions create-b
{
echo '#include <foo.h>' > $(<)
}
copy a : b ;
create-b b ;
HDRRULE on b foo.h bar.h = hdrrule ;
HDRSCAN on b foo.h bar.h = \"#include <(.*)>\" ;
"""
# This creates 'a' which depends on 'b', which is generated.
# The generated 'b' contains '#include <foo.h>' and no rules for
# foo.h are given. The system should error out on the first invocation.
t.run_build_system("-f-", stdin=code)
t.fail_test(find(t.stdout(), "...skipped a for lack of foo.h...") == -1)
t.rm('b')
# Now test that if target 'c' also depends on 'b', then it won't be built, as well.
t.run_build_system("-f-", stdin=code + " copy c : b ; DEPENDS c : b ; DEPENDS all : c ; ")
t.fail_test(find(t.stdout(), "...skipped c for lack of foo.h...") == -1)
# Now add a rule for creating foo.h
t.rm('b')
code += """
actions create-foo
{
echo // > $(<)
}
create-foo foo.h ;
"""
t.run_build_system("-f-", stdin=code)
# Run two times, adding explicit dependency from all to foo.h at
# the beginning and at the end, to make sure that foo.h is generated before
# 'a' in all cases.
def mk_right_order_func(s1, s2):
def right_order(s):
n1 = find(s, s1)
n2 = find(s, s2)
return n1 != -1 and n2 != -1 and n1 < n2
return right_order
right_order = mk_right_order_func("create-foo", "copy a")
t.rm(["a", "b", "foo.h"])
t.run_build_system("-d+2 -f-", stdin=code + " DEPENDS all : foo.h ;")
t.fail_test(not right_order(t.stdout()))
t.rm(["a", "b", "foo.h"])
t.run_build_system("-d+2 -f-", stdin=" DEPENDS all : foo.h ; " + code)
t.fail_test(not right_order(t.stdout()))
# Now foo.h exists. Test include from b -> foo.h -> bar.h -> biz.h
# b and foo.h already have updating actions.
t.rm(["a", "b"])
t.write("foo.h", "#include <bar.h>")
t.write("bar.h", "#include <biz.h>")
t.run_build_system("-d+2 -f-", stdin=code)
t.fail_test(find(t.stdout(), "...skipped a for lack of biz.h...") == -1)
# Add an action for biz.h
code += """
actions create-biz
{
echo // > $(<)
}
create-biz biz.h ;
"""
t.rm(["b"])
right_order = mk_right_order_func("create-biz", "copy a")
t.run_build_system("-d+2 -f-", stdin=code + " DEPENDS all : biz.h ;")
t.fail_test(not right_order(t.stdout()))
t.rm(["a", "biz.h"])
t.run_build_system("-d+2 -f-", stdin=" DEPENDS all : biz.h ; " + code)
t.fail_test(not right_order(t.stdout()))
t.write("a", "")
code="""
DEPENDS all : main d ;
actions copy
{
cp $(>) $(<) ;
}
DEPENDS main : a ;
copy main : a ;
INCLUDES a : <1>c ;
NOCARE <1>c ;
SEARCH on <1>c = . ;
actions create-c
{
echo d > $(<)
}
actions create-d
{
echo // > $(<)
}
create-c <2>c ;
LOCATE on <2>c = . ;
create-d d ;
HDRSCAN on <1>c = (.*) ;
HDRRULE on <1>c = hdrrule ;
rule hdrrule
{
INCLUDES $(1) : d ;
}
"""
right_order = mk_right_order_func("create-d", "copy main")
t.run_build_system("-d2 -f-", stdin=code)
t.fail_test(not right_order(t.stdout()))
t.cleanup()
|
apache-2.0
|
undoware/neutron-drive
|
neutron-drive/chardet/big5freq.py
|
323
|
82588
|
######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = ( \
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
|
bsd-3-clause
|
kobiso/ControlledDropout
|
deepnet/mc_avg.py
|
13
|
2226
|
"""Monte Carlo model averaging for dropout networks."""
from neuralnet import *
from trainer import *
import glob
import sys
import random
def ExtractRepresentations(model_file, train_op_file, layernames,
base_output_dir, memory = '100M', k=10):
LockGPU()
model = util.ReadModel(model_file)
op = ReadOperation(train_op_file)
op.randomize = False
net = CreateDeepnet(model, op, op)
net.LoadModelOnGPU()
net.SetUpData()
for i in range(k):
output_dir = os.path.join(base_output_dir, 'sample_%.5d' % i)
sys.stdout.write('\r Sample %d' % (i+1))
sys.stdout.flush()
net.WriteRepresentationToDisk(layernames, output_dir, memory=memory, drop=True)
sys.stdout.write('\n')
FreeGPU()
def GetAverageResult(truth_file, pred_dir, total, k, avg_over=10):
sample_ids = range(total)
x = []
pred_dict = {}
truth = np.load(truth_file)
for t in range(avg_over):
avg_pred = None
for j in range(k):
i = random.choice(sample_ids)
prediction_file = glob.glob(os.path.join(pred_dir, 'sample_%.5d' % i, '*.npy'))[0]
predictions = pred_dict.get(i, np.load(prediction_file))
pred_dict[i] = predictions
if avg_pred is None:
avg_pred = predictions
else:
avg_pred += predictions
avg_pred /= k
pred = avg_pred.argmax(axis=1)
error = len((pred - truth).nonzero()[0])
x.append((100. * error) / len(truth))
x = np.array(x)
return x.mean(), x.std()
def main():
model_file = sys.argv[1]
model = util.ReadModel(model_file)
train_op_file = sys.argv[2]
output_dir = sys.argv[3]
layernames = ['output_layer']
total = 1000
k = 200
avg_over = 100
true_label_file = '/ais/gobi3/u/nitish/mnist/test_labels.npy'
plot_data_file = '/ais/gobi3/u/nitish/mnist/results/mc_avg.npy'
#ExtractRepresentations(model_file, train_op_file, layernames, output_dir, memory='1G', k=total)
out = np.zeros((k, 3))
for l in range(1, k+1):
mean, std = GetAverageResult(true_label_file, output_dir, total, l, avg_over=avg_over)
print '%d %.4f %.4f' % (l, mean, std)
out[l-1, 0] = l
out[l-1, 1] = mean
out[l-1, 2] = std
np.save(plot_data_file, out)
if __name__ == '__main__':
main()
|
bsd-3-clause
|
gnulinooks/sympy
|
sympy/mpmath/tests/test_ode_mpmath.py
|
6
|
1825
|
from sympy.mpmath.calculus import ODE_step_euler, ODE_step_rk4, odeint, arange
from sympy.mpmath import odefun, cos, sin, mpf, sinc, mp
solvers = [ODE_step_euler, ODE_step_rk4]
def test_ode1():
"""
Let's solve:
x'' + w**2 * x = 0
i.e. x1 = x, x2 = x1':
x1' = x2
x2' = -x1
"""
def derivs((x1, x2), t):
return x2, -x1
for solver in solvers:
t = arange(0, 3.1415926, 0.005)
sol = odeint(derivs, (0., 1.), t, solver)
x1 = [a[0] for a in sol]
x2 = [a[1] for a in sol]
# the result is x1 = sin(t), x2 = cos(t)
# let's just check the end points for t = pi
assert abs(x1[-1]) < 1e-2
assert abs(x2[-1] - (-1)) < 1e-2
def test_ode2():
"""
Let's solve:
x' - x = 0
i.e. x = exp(x)
"""
def derivs((x), t):
return x
for solver in solvers:
t = arange(0, 1, 1e-3)
sol = odeint(derivs, (1.,), t, solver)
x = [a[0] for a in sol]
# the result is x = exp(t)
# let's just check the end point for t = 1, i.e. x = e
assert abs(x[-1] - 2.718281828) < 1e-2
def test_odefun_rational():
mp.dps = 15
# A rational function
f = lambda t: 1/(1+mpf(t)**2)
g = odefun(lambda x, y: [-2*x*y[0]**2], 0, [f(0)])
assert f(2).ae(g(2)[0])
def test_odefun_sinc_large():
mp.dps = 15
# Sinc function; test for large x
f = sinc
g = odefun(lambda x, y: [(cos(x)-y[0])/x], 1, [f(1)], tol=0.01, degree=5)
assert abs(f(100) - g(100)[0])/f(100) < 0.01
def test_odefun_harmonic():
mp.dps = 15
# Harmonic oscillator
f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
for x in [0, 1, 2.5, 8, 3.7]: # we go back to 3.7 to check caching
c, s = f(x)
assert c.ae(cos(x))
assert s.ae(sin(x))
|
bsd-3-clause
|
orgito/ansible
|
test/units/mock/loader.py
|
127
|
3861
|
# (c) 2012-2014, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
from ansible.errors import AnsibleParserError
from ansible.parsing.dataloader import DataLoader
from ansible.module_utils._text import to_bytes, to_text
class DictDataLoader(DataLoader):
def __init__(self, file_mapping=None):
file_mapping = {} if file_mapping is None else file_mapping
assert type(file_mapping) == dict
super(DictDataLoader, self).__init__()
self._file_mapping = file_mapping
self._build_known_directories()
self._vault_secrets = None
def load_from_file(self, path, cache=True, unsafe=False):
path = to_text(path)
if path in self._file_mapping:
return self.load(self._file_mapping[path], path)
return None
# TODO: the real _get_file_contents returns a bytestring, so we actually convert the
# unicode/text it's created with to utf-8
def _get_file_contents(self, path):
path = to_text(path)
if path in self._file_mapping:
return (to_bytes(self._file_mapping[path]), False)
else:
raise AnsibleParserError("file not found: %s" % path)
def path_exists(self, path):
path = to_text(path)
return path in self._file_mapping or path in self._known_directories
def is_file(self, path):
path = to_text(path)
return path in self._file_mapping
def is_directory(self, path):
path = to_text(path)
return path in self._known_directories
def list_directory(self, path):
ret = []
path = to_text(path)
for x in (list(self._file_mapping.keys()) + self._known_directories):
if x.startswith(path):
if os.path.dirname(x) == path:
ret.append(os.path.basename(x))
return ret
def is_executable(self, path):
# FIXME: figure out a way to make paths return true for this
return False
def _add_known_directory(self, directory):
if directory not in self._known_directories:
self._known_directories.append(directory)
def _build_known_directories(self):
self._known_directories = []
for path in self._file_mapping:
dirname = os.path.dirname(path)
while dirname not in ('/', ''):
self._add_known_directory(dirname)
dirname = os.path.dirname(dirname)
def push(self, path, content):
rebuild_dirs = False
if path not in self._file_mapping:
rebuild_dirs = True
self._file_mapping[path] = content
if rebuild_dirs:
self._build_known_directories()
def pop(self, path):
if path in self._file_mapping:
del self._file_mapping[path]
self._build_known_directories()
def clear(self):
self._file_mapping = dict()
self._known_directories = []
def get_basedir(self):
return os.getcwd()
def set_vault_secrets(self, vault_secrets):
self._vault_secrets = vault_secrets
|
gpl-3.0
|
fjbatresv/odoo
|
addons/report_intrastat/report_intrastat.py
|
201
|
5606
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.sql import drop_view_if_exists
from openerp.addons.decimal_precision import decimal_precision as dp
class res_country(osv.osv):
_name = 'res.country'
_inherit = 'res.country'
_columns = {
'intrastat': fields.boolean('Intrastat member'),
}
_defaults = {
'intrastat': lambda *a: False,
}
class report_intrastat_code(osv.osv):
_name = "report.intrastat.code"
_description = "Intrastat code"
_translate = False
_columns = {
'name': fields.char('Intrastat Code'),
'description': fields.char('Description'),
}
class product_template(osv.osv):
_name = "product.template"
_inherit = "product.template"
_columns = {
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat code'),
}
class report_intrastat(osv.osv):
_name = "report.intrastat"
_description = "Intrastat report"
_auto = False
_columns = {
'name': fields.char('Year', required=False, readonly=True),
'month':fields.selection([('01','January'), ('02','February'), ('03','March'), ('04','April'), ('05','May'), ('06','June'),
('07','July'), ('08','August'), ('09','September'), ('10','October'), ('11','November'), ('12','December')],'Month', readonly=True),
'supply_units':fields.float('Supply Units', readonly=True),
'ref':fields.char('Source document', readonly=True),
'code': fields.char('Country code', size=2, readonly=True),
'intrastat_id': fields.many2one('report.intrastat.code', 'Intrastat code', readonly=True),
'weight': fields.float('Weight', readonly=True),
'value': fields.float('Value', readonly=True, digits_compute=dp.get_precision('Account')),
'type': fields.selection([('import', 'Import'), ('export', 'Export')], 'Type'),
'currency_id': fields.many2one('res.currency', "Currency", readonly=True),
}
def init(self, cr):
drop_view_if_exists(cr, 'report_intrastat')
cr.execute("""
create or replace view report_intrastat as (
select
to_char(inv.create_date, 'YYYY') as name,
to_char(inv.create_date, 'MM') as month,
min(inv_line.id) as id,
intrastat.id as intrastat_id,
upper(inv_country.code) as code,
sum(case when inv_line.price_unit is not null
then inv_line.price_unit * inv_line.quantity
else 0
end) as value,
sum(
case when uom.category_id != puom.category_id then (pt.weight_net * inv_line.quantity)
else (pt.weight_net * inv_line.quantity * uom.factor) end
) as weight,
sum(
case when uom.category_id != puom.category_id then inv_line.quantity
else (inv_line.quantity * uom.factor) end
) as supply_units,
inv.currency_id as currency_id,
inv.number as ref,
case when inv.type in ('out_invoice','in_refund')
then 'export'
else 'import'
end as type
from
account_invoice inv
left join account_invoice_line inv_line on inv_line.invoice_id=inv.id
left join (product_template pt
left join product_product pp on (pp.product_tmpl_id = pt.id))
on (inv_line.product_id = pp.id)
left join product_uom uom on uom.id=inv_line.uos_id
left join product_uom puom on puom.id = pt.uom_id
left join report_intrastat_code intrastat on pt.intrastat_id = intrastat.id
left join (res_partner inv_address
left join res_country inv_country on (inv_country.id = inv_address.country_id))
on (inv_address.id = inv.partner_id)
where
inv.state in ('open','paid')
and inv_line.product_id is not null
and inv_country.intrastat=true
group by to_char(inv.create_date, 'YYYY'), to_char(inv.create_date, 'MM'),intrastat.id,inv.type,pt.intrastat_id, inv_country.code,inv.number, inv.currency_id
)""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
15Dkatz/pants
|
contrib/python/tests/python/pants_test/contrib/python/checks/tasks/checkstyle/test_future_compatibility.py
|
18
|
2005
|
# coding=utf-8
# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from pants_test.contrib.python.checks.tasks.checkstyle.plugin_test_base import \
CheckstylePluginTestBase
from pants.contrib.python.checks.tasks.checkstyle.common import Nit, PythonFile
from pants.contrib.python.checks.tasks.checkstyle.future_compatibility import FutureCompatibility
BAD_CLASS = PythonFile.from_statement("""
class Distiller(object):
CONSTANT = "foo"
def foo(self, value):
return os.path.join(Distiller.CONSTANT, value)
""")
class FutureCompatibilityTest(CheckstylePluginTestBase):
plugin_type = FutureCompatibility
def exemplar_fail(self, code, severity, statement):
self.assertNit(statement, code, severity)
def exemplar_pass(self, statement):
self.assertNoNits(statement)
def test_xrange(self):
self.exemplar_fail('T603', Nit.ERROR, """
for k in range(5):
pass
for k in xrange(10):
pass
""")
self.exemplar_pass("""
for k in obj.xrange(10):
pass
""")
def test_iters(self):
for function_name in FutureCompatibility.BAD_ITERS:
self.exemplar_fail('T602', Nit.ERROR, """
d = {1: 2, 2: 3, 3: 4}
for k in d.%s():
pass
for k in d.values():
pass
""" % function_name)
def test_names(self):
for class_name in FutureCompatibility.BAD_NAMES:
self.exemplar_fail('T604', Nit.ERROR, """
if isinstance(k, %s):
pass
if isinstance(k, str):
pass
""" % class_name)
def test_metaclass(self):
self.exemplar_fail('T605', Nit.WARNING, """
class Singleton(object):
__metaclass__ = SingletonMetaclass
CONSTANT = 2 + 3
def __init__(self):
pass
""")
|
apache-2.0
|
pheanex/ansible-modules-extras
|
system/ohai.py
|
86
|
1695
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2012, Michael DeHaan <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
DOCUMENTATION = '''
---
module: ohai
short_description: Returns inventory data from I(Ohai)
description:
- Similar to the M(facter) module, this runs the I(Ohai) discovery program
(U(http://wiki.opscode.com/display/chef/Ohai)) on the remote host and
returns JSON inventory data.
I(Ohai) data is a bit more verbose and nested than I(facter).
version_added: "0.6"
options: {}
notes: []
requirements: [ "ohai" ]
author:
- "Ansible Core Team"
- "Michael DeHaan (@mpdehaan)"
'''
EXAMPLES = '''
# Retrieve (ohai) data from all Web servers and store in one-file per host
ansible webservers -m ohai --tree=/tmp/ohaidata
'''
def main():
module = AnsibleModule(
argument_spec = dict()
)
cmd = ["/usr/bin/env", "ohai"]
rc, out, err = module.run_command(cmd, check_rc=True)
module.exit_json(**json.loads(out))
# import module snippets
from ansible.module_utils.basic import *
main()
|
gpl-3.0
|
jegger/kivy
|
examples/canvas/canvas_stress.py
|
21
|
2146
|
'''
Canvas stress
=============
This example tests the performance of our Graphics engine by drawing large
numbers of small squares. You should see a black canvas with buttons and a
label at the bottom. Pressing the buttons adds small colored squares to the
canvas.
'''
from kivy.uix.button import Button
from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.uix.boxlayout import BoxLayout
from kivy.app import App
from kivy.graphics import Color, Rectangle
from random import random as r
from functools import partial
class StressCanvasApp(App):
def add_rects(self, label, wid, count, *largs):
label.text = str(int(label.text) + count)
with wid.canvas:
for x in range(count):
Color(r(), 1, 1, mode='hsv')
Rectangle(pos=(r() * wid.width + wid.x,
r() * wid.height + wid.y), size=(20, 20))
def double_rects(self, label, wid, *largs):
count = int(label.text)
self.add_rects(label, wid, count, *largs)
def reset_rects(self, label, wid, *largs):
label.text = '0'
wid.canvas.clear()
def build(self):
wid = Widget()
label = Label(text='0')
btn_add100 = Button(text='+ 100 rects',
on_press=partial(self.add_rects, label, wid, 100))
btn_add500 = Button(text='+ 500 rects',
on_press=partial(self.add_rects, label, wid, 500))
btn_double = Button(text='x 2',
on_press=partial(self.double_rects, label, wid))
btn_reset = Button(text='Reset',
on_press=partial(self.reset_rects, label, wid))
layout = BoxLayout(size_hint=(1, None), height=50)
layout.add_widget(btn_add100)
layout.add_widget(btn_add500)
layout.add_widget(btn_double)
layout.add_widget(btn_reset)
layout.add_widget(label)
root = BoxLayout(orientation='vertical')
root.add_widget(wid)
root.add_widget(layout)
return root
if __name__ == '__main__':
StressCanvasApp().run()
|
mit
|
seecr/weightless-core
|
test/udp/acceptortest.py
|
1
|
3124
|
## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2013, 2015, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
# Copyright (C) 2015 Koninklijke Bibliotheek (KB) http://www.kb.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
from unittest import TestCase
from seecr.test import CallTrace
from seecr.test.portnumbergenerator import PortNumberGenerator
from socket import socket, AF_INET, SOCK_DGRAM
from subprocess import Popen, PIPE
from weightless.udp import Acceptor
class UdpAcceptorTest(TestCase):
def testStartListening(self):
reactor = CallTrace()
port = PortNumberGenerator.next()
Acceptor(reactor, port, lambda sok: lambda: None)
self.assertEqual('addReader', reactor.calledMethods[0].name)
sok = reactor.calledMethods[0].args[0]
with Popen(['netstat', '--numeric', '--listening', '--udp'], stdout=PIPE, stderr=PIPE) as proc:
out, _ = proc.communicate()
self.assertTrue(str(port) in out.decode(), out.decode())
sok.close()
callback = reactor.calledMethods[0].args[1]
self.assertTrue(callable(callback))
def testHandle(self):
data = []
def sinkFactory(sock):
def handle():
data.append(sock.recvfrom(2048))
return handle
reactor = CallTrace()
port = PortNumberGenerator.next()
Acceptor(reactor, port, sinkFactory)
self.assertEqual('addReader', reactor.calledMethods[0].name)
handleCallback = reactor.calledMethods[0].args[1]
sok = socket(AF_INET, SOCK_DGRAM)
sok.sendto(b"TEST", ('localhost', port))
handleCallback()
contents, remoteAddr = data[0]
self.assertEqual(b"TEST", contents)
sok.sendto(b"ANOTHER TEST", ('localhost', port))
handleCallback()
self.assertEqual(2, len(data))
reactor.calledMethods[0].args[0].close()
sok.close()
def testAcceptorWithPrio(self):
reactor = CallTrace()
port = PortNumberGenerator.next()
acc = Acceptor(reactor, port, lambda sok: None, prio=5)
self.assertEqual('addReader', reactor.calledMethods[0].name)
self.assertEqual(5, reactor.calledMethods[0].kwargs['prio'])
acc.close()
|
gpl-2.0
|
nwjs/blink
|
Tools/Scripts/webkitpy/style/patchreader.py
|
188
|
3699
|
# Copyright (C) 2010 Google Inc. All rights reserved.
# Copyright (C) 2010 Chris Jerdonek ([email protected])
# Copyright (C) 2010 ProFUSION embedded systems
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
from webkitpy.common.checkout.diff_parser import DiffParser
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.filesystem import FileSystem
from webkitpy.common.checkout.scm.detection import SCMDetector
_log = logging.getLogger(__name__)
class PatchReader(object):
"""Supports checking style in patches."""
def __init__(self, text_file_reader):
"""Create a PatchReader instance.
Args:
text_file_reader: A TextFileReader instance.
"""
self._text_file_reader = text_file_reader
def check(self, patch_string, fs=None):
"""Check style in the given patch."""
fs = fs or FileSystem()
patch_files = DiffParser(patch_string.splitlines()).files
# If the user uses git, checking subversion config file only once is enough.
call_only_once = True
for path, diff_file in patch_files.iteritems():
line_numbers = diff_file.added_or_modified_line_numbers()
_log.debug('Found %s new or modified lines in: %s' % (len(line_numbers), path))
if not line_numbers:
match = re.search("\s*png$", path)
if match and fs.exists(path):
if call_only_once:
self._text_file_reader.process_file(file_path=path, line_numbers=None)
cwd = FileSystem().getcwd()
detection = SCMDetector(fs, Executive()).detect_scm_system(cwd)
if detection.display_name() == "git":
call_only_once = False
continue
# Don't check files which contain only deleted lines
# as they can never add style errors. However, mark them as
# processed so that we count up number of such files.
self._text_file_reader.count_delete_only_file()
continue
self._text_file_reader.process_file(file_path=path, line_numbers=line_numbers)
|
bsd-3-clause
|
nextgis-extra/tests
|
lib_gdal/gdrivers/nitf.py
|
1
|
101882
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# $Id: nitf.py 33793 2016-03-26 13:02:07Z goatbar $
#
# Project: GDAL/OGR Test Suite
# Purpose: Test read/write functionality for NITF driver.
# Author: Frank Warmerdam <[email protected]>
#
###############################################################################
# Copyright (c) 2003, Frank Warmerdam <[email protected]>
# Copyright (c) 2008-2013, Even Rouault <even dot rouault at mines-paris dot org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
###############################################################################
import os
import sys
from osgeo import gdal
from osgeo import osr
import array
import struct
import shutil
from sys import version_info
sys.path.append( '../pymod' )
import gdaltest
###############################################################################
# Write/Read test of simple byte reference data.
def nitf_1():
tst = gdaltest.GDALTest( 'NITF', 'byte.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple 16bit reference data.
def nitf_2():
tst = gdaltest.GDALTest( 'NITF', 'int16.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Write/Read RGB image with lat/long georeferencing, and verify.
def nitf_3():
tst = gdaltest.GDALTest( 'NITF', 'rgbsmall.tif', 3, 21349 )
return tst.testCreateCopy()
###############################################################################
# Test direction creation of an NITF file.
def nitf_create(creation_options, set_inverted_color_interp = True):
drv = gdal.GetDriverByName( 'NITF' )
try:
os.remove( 'tmp/test_create.ntf' )
except:
pass
ds = drv.Create( 'tmp/test_create.ntf', 200, 100, 3, gdal.GDT_Byte,
creation_options )
ds.SetGeoTransform( (100, 0.1, 0.0, 30.0, 0.0, -0.1 ) )
if set_inverted_color_interp:
ds.GetRasterBand( 1 ).SetRasterColorInterpretation( gdal.GCI_BlueBand )
ds.GetRasterBand( 2 ).SetRasterColorInterpretation( gdal.GCI_GreenBand )
ds.GetRasterBand( 3 ).SetRasterColorInterpretation( gdal.GCI_RedBand )
else:
ds.GetRasterBand( 1 ).SetRasterColorInterpretation( gdal.GCI_RedBand )
ds.GetRasterBand( 2 ).SetRasterColorInterpretation( gdal.GCI_GreenBand )
ds.GetRasterBand( 3 ).SetRasterColorInterpretation( gdal.GCI_BlueBand )
my_list = list(range(200)) + list(range(20,220)) + list(range(30,230))
raw_data = array.array('h',my_list).tostring()
for line in range(100):
ds.WriteRaster( 0, line, 200, 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1,2,3] )
ds = None
return 'success'
###############################################################################
# Test direction creation of an non-compressed NITF file.
def nitf_4():
return nitf_create([ 'ICORDS=G' ])
###############################################################################
# Verify created file
def nitf_check_created_file(checksum1, checksum2, checksum3, set_inverted_color_interp = True):
ds = gdal.Open( 'tmp/test_create.ntf' )
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = checksum1
if chksum != chksum_expect:
gdaltest.post_reason( 'Did not get expected chksum for band 1' )
print(chksum, chksum_expect)
return 'fail'
chksum = ds.GetRasterBand(2).Checksum()
chksum_expect = checksum2
if chksum != chksum_expect:
gdaltest.post_reason( 'Did not get expected chksum for band 2' )
print(chksum, chksum_expect)
return 'fail'
chksum = ds.GetRasterBand(3).Checksum()
chksum_expect = checksum3
if chksum != chksum_expect:
gdaltest.post_reason( 'Did not get expected chksum for band 3' )
print(chksum, chksum_expect)
return 'fail'
geotransform = ds.GetGeoTransform()
if abs(geotransform[0]-100) > 0.1 \
or abs(geotransform[1]-0.1) > 0.001 \
or abs(geotransform[2]-0) > 0.001 \
or abs(geotransform[3]-30.0) > 0.1 \
or abs(geotransform[4]-0) > 0.001 \
or abs(geotransform[5]- -0.1) > 0.001:
print(geotransform)
gdaltest.post_reason( 'geotransform differs from expected' )
return 'fail'
if set_inverted_color_interp:
if ds.GetRasterBand(1).GetRasterColorInterpretation() != gdal.GCI_BlueBand:
gdaltest.post_reason( 'Got wrong color interpretation.' )
return 'fail'
if ds.GetRasterBand(2).GetRasterColorInterpretation() !=gdal.GCI_GreenBand:
gdaltest.post_reason( 'Got wrong color interpretation.' )
return 'fail'
if ds.GetRasterBand(3).GetRasterColorInterpretation() != gdal.GCI_RedBand:
gdaltest.post_reason( 'Got wrong color interpretation.' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Verify file created by nitf_4()
def nitf_5():
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Read existing NITF file. Verifies the new adjusted IGEOLO interp.
def nitf_6():
tst = gdaltest.GDALTest( 'NITF', 'rgb.ntf', 3, 21349 )
return tst.testOpen( check_prj = 'WGS84',
check_gt = (-44.842029478458, 0.003503401360, 0,
-22.930748299319, 0, -0.003503401360) )
###############################################################################
# NITF in-memory.
def nitf_7():
tst = gdaltest.GDALTest( 'NITF', 'rgbsmall.tif', 3, 21349 )
return tst.testCreateCopy( vsimem = 1 )
###############################################################################
# Verify we can open an NSIF file, and get metadata including BLOCKA.
def nitf_8():
ds = gdal.Open( 'data/fake_nsif.ntf' )
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = 12033
if chksum != chksum_expect:
gdaltest.post_reason( 'Did not get expected chksum for band 1' )
print(chksum, chksum_expect)
return 'fail'
md = ds.GetMetadata()
if md['NITF_FHDR'] != 'NSIF01.00':
gdaltest.post_reason( 'Got wrong FHDR value' )
return 'fail'
if md['NITF_BLOCKA_BLOCK_INSTANCE_01'] != '01' \
or md['NITF_BLOCKA_BLOCK_COUNT'] != '01' \
or md['NITF_BLOCKA_N_GRAY_01'] != '00000' \
or md['NITF_BLOCKA_L_LINES_01'] != '01000' \
or md['NITF_BLOCKA_LAYOVER_ANGLE_01'] != '000' \
or md['NITF_BLOCKA_SHADOW_ANGLE_01'] != '000' \
or md['NITF_BLOCKA_FRLC_LOC_01'] != '+41.319331+020.078400' \
or md['NITF_BLOCKA_LRLC_LOC_01'] != '+41.317083+020.126072' \
or md['NITF_BLOCKA_LRFC_LOC_01'] != '+41.281634+020.122570' \
or md['NITF_BLOCKA_FRFC_LOC_01'] != '+41.283881+020.074924':
gdaltest.post_reason( 'BLOCKA metadata has unexpected value.' )
return 'fail'
return 'success'
###############################################################################
# Create and read a JPEG encoded NITF file.
def nitf_9():
src_ds = gdal.Open( 'data/rgbsmall.tif' )
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf9.ntf', src_ds,
options = ['IC=C3'] )
src_ds = None
ds = None
ds = gdal.Open( 'tmp/nitf9.ntf' )
(exp_mean, exp_stddev) = (65.9532, 46.9026375565)
(mean, stddev) = ds.GetRasterBand(1).ComputeBandStats()
if abs(exp_mean-mean) > 0.1 or abs(exp_stddev-stddev) > 0.1:
print(mean, stddev)
gdaltest.post_reason( 'did not get expected mean or standard dev.' )
return 'fail'
md = ds.GetMetadata('IMAGE_STRUCTURE')
if md['COMPRESSION'] != 'JPEG':
gdaltest.post_reason( 'Did not get expected compression value.' )
return 'fail'
return 'success'
###############################################################################
# For esoteric reasons, createcopy from jpeg compressed nitf files can be
# tricky. Verify this is working.
def nitf_10():
src_ds = gdal.Open('tmp/nitf9.ntf')
expected_cs = src_ds.GetRasterBand(2).Checksum()
src_ds = None
if expected_cs != 22296 and expected_cs != 22259:
gdaltest.post_reason( 'fail' )
return 'fail'
tst = gdaltest.GDALTest( 'NITF', '../tmp/nitf9.ntf', 2, expected_cs )
return tst.testCreateCopy()
###############################################################################
# Test 1bit file ... conveniently very small and easy to include! (#1854)
def nitf_11():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/i_3034c.ntf
tst = gdaltest.GDALTest( 'NITF', 'i_3034c.ntf', 1, 170 )
return tst.testOpen()
###############################################################################
# Verify that TRE and CGM access via the metadata domain works.
def nitf_12():
ds = gdal.Open( 'data/fake_nsif.ntf' )
mdTRE = ds.GetMetadata( 'TRE' )
try: # NG bindings
blockA = ds.GetMetadataItem( 'BLOCKA', 'TRE' )
except:
blockA = mdTRE['BLOCKA']
mdCGM = ds.GetMetadata( 'CGM' )
try: # NG bindings
segmentCount = ds.GetMetadataItem( 'SEGMENT_COUNT', 'CGM' )
except:
segmentCount = mdCGM['SEGMENT_COUNT']
ds = None
expectedBlockA = '010000001000000000 +41.319331+020.078400+41.317083+020.126072+41.281634+020.122570+41.283881+020.074924 '
if mdTRE['BLOCKA'] != expectedBlockA:
gdaltest.post_reason( 'did not find expected BLOCKA from metadata.' )
return 'fail'
if blockA != expectedBlockA:
gdaltest.post_reason( 'did not find expected BLOCKA from metadata item.' )
return 'fail'
if mdCGM['SEGMENT_COUNT'] != '0':
gdaltest.post_reason( 'did not find expected SEGMENT_COUNT from metadata.' )
return 'fail'
if segmentCount != '0':
gdaltest.post_reason( 'did not find expected SEGMENT_COUNT from metadata item.' )
return 'fail'
return 'success'
###############################################################################
# Test creation of an NITF file in UTM Zone 11, Southern Hemisphere.
def nitf_13():
drv = gdal.GetDriverByName( 'NITF' )
ds = drv.Create( 'tmp/test_13.ntf', 200, 100, 1, gdal.GDT_Byte,
[ 'ICORDS=S' ] )
ds.SetGeoTransform( (400000, 10, 0.0, 6000000, 0.0, -10 ) )
ds.SetProjection('PROJCS["UTM Zone 11, Southern Hemisphere",GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],TOWGS84[0,0,0,0,0,0,0],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9108"]],AUTHORITY["EPSG","4326"]],PROJECTION["Transverse_Mercator"],PARAMETER["latitude_of_origin",0],PARAMETER["central_meridian",-117],PARAMETER["scale_factor",0.9996],PARAMETER["false_easting",500000],PARAMETER["false_northing",10000000],UNIT["Meter",1]]')
my_list = list(range(200))
raw_data = array.array('f',my_list).tostring()
for line in range(100):
ds.WriteRaster( 0, line, 200, 1, raw_data,
buf_type = gdal.GDT_Int16,
band_list = [1] )
ds = None
return 'success'
###############################################################################
# Verify previous file
def nitf_14():
ds = gdal.Open( 'tmp/test_13.ntf' )
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = 55964
if chksum != chksum_expect:
gdaltest.post_reason( 'Did not get expected chksum for band 1' )
print(chksum, chksum_expect)
return 'fail'
geotransform = ds.GetGeoTransform()
if abs(geotransform[0]-400000) > .1 \
or abs(geotransform[1]-10) > 0.001 \
or abs(geotransform[2]-0) > 0.001 \
or abs(geotransform[3]-6000000) > .1 \
or abs(geotransform[4]-0) > 0.001 \
or abs(geotransform[5]- -10) > 0.001:
print(geotransform)
gdaltest.post_reason( 'geotransform differs from expected' )
return 'fail'
prj = ds.GetProjectionRef()
if prj.find('UTM Zone 11, Southern Hemisphere') == -1:
print(prj)
gdaltest.post_reason( 'Coordinate system not UTM Zone 11, Southern Hemisphere' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test creating an in memory copy.
def nitf_15():
tst = gdaltest.GDALTest( 'NITF', 'byte.tif', 1, 4672 )
return tst.testCreateCopy( vsimem = 1 )
###############################################################################
# Checks a 1-bit mono with mask table having (0x00) black as transparent with white arrow.
def nitf_16():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3034d.nsf
tst = gdaltest.GDALTest( 'NITF', 'ns3034d.nsf', 1, 170 )
return tst.testOpen()
###############################################################################
# Checks a 1-bit RGB/LUT (green arrow) with a mask table (pad pixels having value of 0x00)
# and a transparent pixel value of 1 being mapped to green by the LUT
def nitf_17():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/i_3034f.ntf
tst = gdaltest.GDALTest( 'NITF', 'i_3034f.ntf', 1, 170 )
return tst.testOpen()
###############################################################################
# Test NITF file without image segment
def nitf_18():
# Shut up the warning about missing image segment
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv1_1/U_0006A.NTF
ds = gdal.Open("data/U_0006A.NTF")
gdal.PopErrorHandler()
if ds.RasterCount != 0:
return 'fail'
return 'success'
###############################################################################
# Test BILEVEL (C1) decompression
def nitf_19():
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_0/U_1050A.NTF
tst = gdaltest.GDALTest( 'NITF', 'U_1050A.NTF', 1, 65024 )
return tst.testOpen()
###############################################################################
# Test NITF file consisting only of an header
def nitf_20():
# Shut up the warning about file either corrupt or empty
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
# From http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv1_1/U_0002A.NTF
ds = gdal.Open("data/U_0002A.NTF")
gdal.PopErrorHandler()
if ds is not None:
return 'fail'
return 'success'
###############################################################################
# Verify that TEXT access via the metadata domain works.
#
# See also nitf_35 for writing TEXT segments.
def nitf_21():
# Shut up the warning about missing image segment
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( 'data/ns3114a.nsf' )
gdal.PopErrorHandler()
mdTEXT = ds.GetMetadata( 'TEXT' )
try: # NG bindings
data0 = ds.GetMetadataItem( 'DATA_0', 'TEXT' )
except:
data0 = mdTEXT['DATA_0']
ds = None
if mdTEXT['DATA_0'] != 'A':
gdaltest.post_reason( 'did not find expected DATA_0 from metadata.' )
return 'fail'
if data0 != 'A':
gdaltest.post_reason( 'did not find expected DATA_0 from metadata item.' )
return 'fail'
return 'success'
###############################################################################
# Write/Read test of simple int32 reference data.
def nitf_22():
tst = gdaltest.GDALTest( 'NITF', '../../gcore/data/int32.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple float32 reference data.
def nitf_23():
tst = gdaltest.GDALTest( 'NITF', '../../gcore/data/float32.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple float64 reference data.
def nitf_24():
tst = gdaltest.GDALTest( 'NITF', '../../gcore/data/float64.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple uint16 reference data.
def nitf_25():
tst = gdaltest.GDALTest( 'NITF', '../../gcore/data/uint16.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Write/Read test of simple uint32 reference data.
def nitf_26():
tst = gdaltest.GDALTest( 'NITF', '../../gcore/data/uint32.tif', 1, 4672 )
return tst.testCreateCopy()
###############################################################################
# Test Create() with IC=NC compression, and multi-blocks
def nitf_27():
if nitf_create([ 'ICORDS=G', 'IC=NC', 'BLOCKXSIZE=10', 'BLOCKYSIZE=10' ]) != 'success':
return 'fail'
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test Create() with IC=C8 compression with the JP2ECW driver
def nitf_28_jp2ecw():
gdaltest.nitf_28_jp2ecw_is_ok = False
import ecw
if not ecw.has_write_support():
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2ECW')
if nitf_create([ 'ICORDS=G', 'IC=C8', 'TARGET=75' ], set_inverted_color_interp = False) == 'success':
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp = False)
if ret == 'success':
gdaltest.nitf_28_jp2ecw_is_ok = True
else:
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test reading the previously create file with the JP2MrSID driver
# (The NITF driver only looks for the JP2ECW driver when creating IC=C8 NITF files,
# but allows any GDAL driver to open the JP2 stream inside it)
def nitf_28_jp2mrsid():
if not gdaltest.nitf_28_jp2ecw_is_ok:
return 'skip'
try:
jp2mrsid_drv = gdal.GetDriverByName( 'JP2MrSID' )
except:
jp2mrsid_drv = None
if jp2mrsid_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2MrSID')
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp = False)
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test reading the previously create file with the JP2KAK driver
# (The NITF driver only looks for the JP2ECW driver when creating IC=C8 NITF files,
# but allows any GDAL driver to open the JP2 stream inside it)
#
# Note: I (E. Rouault) haven't been able to check that this test actually works.
def nitf_28_jp2kak():
if not gdaltest.nitf_28_jp2ecw_is_ok:
return 'skip'
try:
jp2kak_drv = gdal.GetDriverByName( 'JP2KAK' )
except:
jp2kak_drv = None
if jp2kak_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but('JP2KAK')
ret = nitf_check_created_file(32398, 42502, 38882, set_inverted_color_interp = False)
gdaltest.reregister_all_jpeg2000_drivers()
return ret
###############################################################################
# Test Create() with a LUT
def nitf_29():
drv = gdal.GetDriverByName( 'NITF' )
ds = drv.Create( 'tmp/test_29.ntf', 1, 1, 1, gdal.GDT_Byte,
[ 'IREP=RGB/LUT', 'LUT_SIZE=128' ] )
ct = gdal.ColorTable()
ct.SetColorEntry( 0, (255,255,255,255) )
ct.SetColorEntry( 1, (255,255,0,255) )
ct.SetColorEntry( 2, (255,0,255,255) )
ct.SetColorEntry( 3, (0,255,255,255) )
ds.GetRasterBand( 1 ).SetRasterColorTable( ct )
ds = None
ds = gdal.Open( 'tmp/test_29.ntf' )
ct = ds.GetRasterBand( 1 ).GetRasterColorTable()
if ct.GetCount() != 129 or \
ct.GetColorEntry(0) != (255,255,255,255) or \
ct.GetColorEntry(1) != (255,255,0,255) or \
ct.GetColorEntry(2) != (255,0,255,255) or \
ct.GetColorEntry(3) != (0,255,255,255):
gdaltest.post_reason( 'Wrong color table entry.' )
return 'fail'
new_ds = drv.CreateCopy( 'tmp/test_29_copy.ntf', ds )
del new_ds
ds = None
ds = gdal.Open( 'tmp/test_29_copy.ntf' )
ct = ds.GetRasterBand( 1 ).GetRasterColorTable()
if ct.GetCount() != 130 or \
ct.GetColorEntry(0) != (255,255,255,255) or \
ct.GetColorEntry(1) != (255,255,0,255) or \
ct.GetColorEntry(2) != (255,0,255,255) or \
ct.GetColorEntry(3) != (0,255,255,255):
gdaltest.post_reason( 'Wrong color table entry.' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Verify we can write a file with BLOCKA TRE and read it back properly.
def nitf_30():
src_ds = gdal.Open( 'data/fake_nsif.ntf' )
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf30.ntf', src_ds )
src_ds = None
chksum = ds.GetRasterBand(1).Checksum()
chksum_expect = 12033
if chksum != chksum_expect:
gdaltest.post_reason( 'Did not get expected chksum for band 1' )
print(chksum, chksum_expect)
return 'fail'
md = ds.GetMetadata()
if md['NITF_FHDR'] != 'NSIF01.00':
gdaltest.post_reason( 'Got wrong FHDR value' )
return 'fail'
if md['NITF_BLOCKA_BLOCK_INSTANCE_01'] != '01' \
or md['NITF_BLOCKA_BLOCK_COUNT'] != '01' \
or md['NITF_BLOCKA_N_GRAY_01'] != '00000' \
or md['NITF_BLOCKA_L_LINES_01'] != '01000' \
or md['NITF_BLOCKA_LAYOVER_ANGLE_01'] != '000' \
or md['NITF_BLOCKA_SHADOW_ANGLE_01'] != '000' \
or md['NITF_BLOCKA_FRLC_LOC_01'] != '+41.319331+020.078400' \
or md['NITF_BLOCKA_LRLC_LOC_01'] != '+41.317083+020.126072' \
or md['NITF_BLOCKA_LRFC_LOC_01'] != '+41.281634+020.122570' \
or md['NITF_BLOCKA_FRFC_LOC_01'] != '+41.283881+020.074924':
gdaltest.post_reason( 'BLOCKA metadata has unexpected value.' )
return 'fail'
ds = None
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf30.ntf' )
return 'success'
###############################################################################
# Verify we can write a file with a custom TRE and read it back properly.
def nitf_31():
if nitf_create( [ 'TRE=CUSTOM= Test TRE1\\0MORE',
'TRE=TOTEST=SecondTRE',
'ICORDS=G' ] ) != 'success':
return 'fail'
ds = gdal.Open( 'tmp/test_create.ntf' )
md = ds.GetMetadata( 'TRE' )
if len(md) != 2:
gdaltest.post_reason( 'Did not get expected TRE count' )
print(md)
return 'fail'
# Check that the leading space in the CUSTOM metadata item is preserved (#3088, #3204)
try:
if ds.GetMetadataItem( 'CUSTOM', 'TRE') != ' Test TRE1\\0MORE':
gdaltest.post_reason( 'Did not get expected TRE contents' )
print(ds.GetMetadataItem( 'CUSTOM', 'TRE'))
return 'fail'
except:
pass
if md['CUSTOM'] != ' Test TRE1\\0MORE' \
or md['TOTEST'] != 'SecondTRE':
gdaltest.post_reason( 'Did not get expected TRE contents' )
print(md)
return 'fail'
ds = None
return nitf_check_created_file( 32498, 42602, 38982 )
###############################################################################
# Test Create() with ICORDS=D
def nitf_32():
if nitf_create([ 'ICORDS=D' ]) != 'success':
return 'fail'
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test Create() with ICORDS=D and a consistent BLOCKA
def nitf_33():
if nitf_create([ 'ICORDS=D',
'BLOCKA_BLOCK_COUNT=01',
'BLOCKA_BLOCK_INSTANCE_01=01',
'BLOCKA_L_LINES_01=100',
'BLOCKA_FRLC_LOC_01=+29.950000+119.950000',
'BLOCKA_LRLC_LOC_01=+20.050000+119.950000',
'BLOCKA_LRFC_LOC_01=+20.050000+100.050000',
'BLOCKA_FRFC_LOC_01=+29.950000+100.050000' ]) != 'success':
return 'fail'
return nitf_check_created_file(32498, 42602, 38982)
###############################################################################
# Test CreateCopy() of a 16bit image with tiling
def nitf_34():
tst = gdaltest.GDALTest( 'NITF', 'n43.dt0', 1, 49187, options = [ 'BLOCKSIZE=64' ] )
return tst.testCreateCopy( )
###############################################################################
# Test CreateCopy() writing file with a text segment.
def nitf_35():
src_ds = gdal.Open( 'data/text_md.vrt' )
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf_35.ntf', src_ds )
src_ds = None
ds = None
ds = gdal.Open( 'tmp/nitf_35.ntf' )
exp_text = """This is text data
with a newline."""
md = ds.GetMetadata('TEXT')
if md['DATA_0'] != exp_text:
gdaltest.post_reason( 'Did not get expected TEXT metadata.' )
print(md)
return 'fail'
exp_text = """Also, a second text segment is created."""
md = ds.GetMetadata('TEXT')
if md['DATA_1'] != exp_text:
gdaltest.post_reason( 'Did not get expected TEXT metadata.' )
print(md)
return 'fail'
ds = None
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf_35.ntf' )
return 'success'
###############################################################################
# Create and read a JPEG encoded NITF file (C3) with several blocks
# Check that statistics are persisted (#3985)
def nitf_36():
src_ds = gdal.Open( 'data/rgbsmall.tif' )
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf36.ntf', src_ds,
options = ['IC=C3', 'BLOCKSIZE=32', 'QUALITY=100'] )
src_ds = None
ds = None
ds = gdal.Open( 'tmp/nitf36.ntf' )
if ds.GetRasterBand(1).GetMinimum() is not None:
gdaltest.post_reason( 'Did not expect to have minimum value at that point.' )
return 'fail'
(minval, maxval, mean, stddev) = ds.GetRasterBand(1).GetStatistics(False, False)
if stddev >= 0:
gdaltest.post_reason( 'Did not expect to have statistics at that point.' )
return 'fail'
(exp_mean, exp_stddev) = (65.4208, 47.254550335)
(minval, maxval, mean, stddev) = ds.GetRasterBand(1).GetStatistics(False, True)
if abs(exp_mean-mean) > 0.1 or abs(exp_stddev-stddev) > 0.1:
print(mean, stddev)
gdaltest.post_reason( 'did not get expected mean or standard dev.' )
return 'fail'
md = ds.GetMetadata('IMAGE_STRUCTURE')
if md['COMPRESSION'] != 'JPEG':
gdaltest.post_reason( 'Did not get expected compression value.' )
return 'fail'
ds = None
# Check that statistics are persisted (#3985)
ds = gdal.Open( 'tmp/nitf36.ntf' )
if ds.GetRasterBand(1).GetMinimum() is None:
gdaltest.post_reason( 'Should have minimum value at that point.' )
return 'fail'
(minval, maxval, mean, stddev) = ds.GetRasterBand(1).GetStatistics(False, False)
if abs(exp_mean-mean) > 0.1 or abs(exp_stddev-stddev) > 0.1:
print(mean, stddev)
gdaltest.post_reason( 'Should have statistics at that point.' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Create and read a NITF file with 69999 bands
def nitf_37():
try:
if int(gdal.VersionInfo('VERSION_NUM')) < 1700:
return 'skip'
except:
# OG-python bindings don't have gdal.VersionInfo. Too bad, but let's hope that GDAL's version isn't too old !
pass
ds = gdal.GetDriverByName('NITF').Create( 'tmp/nitf37.ntf', 1, 1, 69999)
ds = None
ds = gdal.Open( 'tmp/nitf37.ntf' )
if ds.RasterCount != 69999:
return 'fail'
ds = None
return 'success'
###############################################################################
# Create and read a NITF file with 999 images
def nitf_38():
ds = gdal.Open('data/byte.tif')
nXSize = ds.RasterXSize
nYSize = ds.RasterYSize
data = ds.GetRasterBand(1).ReadRaster(0, 0, nXSize, nYSize)
expected_cs = ds.GetRasterBand(1).Checksum()
ds = gdal.GetDriverByName('NITF').Create( 'tmp/nitf38.ntf', nXSize, nYSize, 1, options = [ 'NUMI=999' ])
ds = None
ds = gdal.Open('NITF_IM:998:tmp/nitf38.ntf', gdal.GA_Update)
ds.GetRasterBand(1).WriteRaster(0, 0, nXSize, nYSize, data)
# Create overviews
ds.BuildOverviews( overviewlist = [2] )
ds = None
ds = gdal.Open( 'NITF_IM:0:tmp/nitf38.ntf' )
if ds.GetRasterBand(1).Checksum() != 0:
return 'fail'
ds = None
ds = gdal.Open( 'NITF_IM:998:tmp/nitf38.ntf' )
cs = ds.GetRasterBand(1).Checksum();
if cs != expected_cs:
print(cs)
gdaltest.post_reason( 'bad checksum for image of 998th subdataset' )
return 'fail'
# Check the overview
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
if cs != 1087:
print(cs)
gdaltest.post_reason( 'bad checksum for overview of image of 998th subdataset' )
return 'fail'
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/nitf38.vrt', ds)
out_ds = None
ds = None
ds = gdal.Open('tmp/nitf38.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/nitf38.vrt')
if cs != expected_cs:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
ds = gdal.Open( 'NITF_IM:998:%s/tmp/nitf38.ntf' % os.getcwd() )
out_ds = gdal.GetDriverByName('VRT').CreateCopy('%s/tmp/nitf38.vrt' % os.getcwd(), ds)
out_ds = None
ds = None
ds = gdal.Open('tmp/nitf38.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/nitf38.vrt')
if cs != expected_cs:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
ds = gdal.Open( 'NITF_IM:998:%s/tmp/nitf38.ntf' % os.getcwd() )
out_ds = gdal.GetDriverByName('VRT').CreateCopy('tmp/nitf38.vrt', ds)
del out_ds
ds = None
ds = gdal.Open('tmp/nitf38.vrt')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('tmp/nitf38.vrt')
if cs != expected_cs:
gdaltest.post_reason('failure')
print(cs)
return 'fail'
return 'success'
###############################################################################
# Create and read a JPEG encoded NITF file (M3) with several blocks
def nitf_39():
src_ds = gdal.Open( 'data/rgbsmall.tif' )
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf39.ntf', src_ds,
options = ['IC=M3', 'BLOCKSIZE=32', 'QUALITY=100'] )
src_ds = None
ds = None
ds = gdal.Open( 'tmp/nitf39.ntf' )
(exp_mean, exp_stddev) = (65.4208, 47.254550335)
(mean, stddev) = ds.GetRasterBand(1).ComputeBandStats()
if abs(exp_mean-mean) > 0.1 or abs(exp_stddev-stddev) > 0.1:
print(mean, stddev)
gdaltest.post_reason( 'did not get expected mean or standard dev.' )
return 'fail'
md = ds.GetMetadata('IMAGE_STRUCTURE')
if md['COMPRESSION'] != 'JPEG':
gdaltest.post_reason( 'Did not get expected compression value.' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Create a 10 GB NITF file
def nitf_40():
# Determine if the filesystem supports sparse files (we don't want to create a real 10 GB
# file !
if (gdaltest.filesystem_supports_sparse_files('tmp') == False):
return 'skip'
width = 99000
height = 99000
x = width - 1
y = height - 1
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf40.ntf', width, height, options = ['BLOCKSIZE=256'])
data = struct.pack('B' * 1, 123)
# Write a non NULL byte at the bottom right corner of the image (around 10 GB offset)
ds.GetRasterBand(1).WriteRaster(x, y, 1, 1, data)
ds = None
# Check that we can fetch it at the right value
ds = gdal.Open('tmp/nitf40.ntf')
if ds.GetRasterBand(1).ReadRaster(x, y, 1, 1) != data:
return 'fail'
ds = None
# Check that it is indeed at a very far offset, and that the NITF driver
# has not put it somewhere else due to involuntary cast to 32bit integer.
blockWidth = 256
blockHeight = 256
nBlockx = int((width+blockWidth-1)/blockWidth)
iBlockx = int(x / blockWidth)
iBlocky = int(y / blockHeight)
ix = x % blockWidth
iy = y % blockHeight
offset = 843 + (iBlocky * nBlockx + iBlockx) * blockWidth * blockHeight + (iy * blockWidth + ix)
try:
os.SEEK_SET
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = list(range(3))
fd = open('tmp/nitf40.ntf', 'rb')
fd.seek(offset, os.SEEK_SET)
bytes_read = fd.read(1)
fd.close()
val = struct.unpack('B' * 1, bytes_read)[0]
if val != 123:
gdaltest.post_reason('Bad value at offset %d : %d' % (offset, val))
return 'fail'
return 'success'
###############################################################################
# Check reading a 12-bit JPEG compressed NITF
def nitf_41():
# Check if JPEG driver supports 12bit JPEG reading/writing
jpg_drv = gdal.GetDriverByName('JPEG')
md = jpg_drv.GetMetadata()
if md[gdal.DMD_CREATIONDATATYPES].find('UInt16') == -1:
sys.stdout.write('(12bit jpeg not available) ... ')
return 'skip'
try:
os.remove('data/U_4017A.NTF.aux.xml')
except:
pass
ds = gdal.Open('data/U_4017A.NTF')
if ds.GetRasterBand(1).DataType != gdal.GDT_UInt16:
return 'fail'
stats = ds.GetRasterBand(1).GetStatistics( 0, 1 )
if stats[2] < 2385 or stats[2] > 2386:
print(stats)
return 'fail'
ds = None
try:
os.remove('data/U_4017A.NTF.aux.xml')
except:
pass
return 'success'
###############################################################################
# Check creating a 12-bit JPEG compressed NITF
def nitf_42():
# Check if JPEG driver supports 12bit JPEG reading/writing
jpg_drv = gdal.GetDriverByName('JPEG')
md = jpg_drv.GetMetadata()
if md[gdal.DMD_CREATIONDATATYPES].find('UInt16') == -1:
sys.stdout.write('(12bit jpeg not available) ... ')
return 'skip'
ds = gdal.Open('data/U_4017A.NTF')
out_ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf42.ntf', ds, options = ['IC=C3', 'FHDR=NITF02.10'])
del out_ds
ds = gdal.Open('tmp/nitf42.ntf')
if ds.GetRasterBand(1).DataType != gdal.GDT_UInt16:
return 'fail'
stats = ds.GetRasterBand(1).GetStatistics( 0, 1 )
if stats[2] < 2385 or stats[2] > 2386:
print(stats)
return 'fail'
ds = None
return 'success'
###############################################################################
# Test CreateCopy() in IC=C8 with various JPEG2000 drivers
def nitf_43(driver_to_test, options):
try:
jp2_drv = gdal.GetDriverByName( driver_to_test )
if driver_to_test == 'JP2ECW' and jp2_drv is not None:
if 'DMD_CREATIONOPTIONLIST' not in jp2_drv.GetMetadata():
jp2_drv = None
except:
jp2_drv = None
if jp2_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('data/byte.tif')
gdal.PushErrorHandler('CPLQuietErrorHandler')
out_ds = gdal.GetDriverByName('NITF').CreateCopy('tmp/nitf_43.ntf', ds, options = options, strict=0)
gdal.PopErrorHandler()
out_ds = None
out_ds = gdal.Open('tmp/nitf_43.ntf')
if out_ds.GetRasterBand(1).Checksum() == 4672:
ret = 'success'
else:
ret = 'fail'
out_ds = None
gdal.GetDriverByName('NITF').Delete('tmp/nitf_43.ntf')
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def nitf_43_jasper():
return nitf_43('JPEG2000', ['IC=C8'])
def nitf_43_jp2ecw():
import ecw
if not ecw.has_write_support():
return 'skip'
return nitf_43('JP2ECW', ['IC=C8', 'TARGET=0'])
def nitf_43_jp2kak():
return nitf_43('JP2KAK', ['IC=C8', 'QUALITY=100'])
###############################################################################
# Check creating a monoblock 10000x1 image (ticket #3263)
def nitf_44():
out_ds = gdal.GetDriverByName('NITF').Create('tmp/nitf44.ntf', 10000, 1)
out_ds.GetRasterBand(1).Fill(255)
out_ds = None
ds = gdal.Open('tmp/nitf44.ntf')
if 'GetBlockSize' in dir(gdal.Band):
(blockx, blocky) = ds.GetRasterBand(1).GetBlockSize()
if blockx != 10000:
return 'fail'
if ds.GetRasterBand(1).Checksum() != 57182:
return 'fail'
ds = None
return 'success'
###############################################################################
# Check overviews on a JPEG compressed subdataset
def nitf_45():
try:
os.remove('tmp/nitf45.ntf.aux.xml')
except:
pass
shutil.copyfile( 'data/two_images_jpeg.ntf', 'tmp/nitf45.ntf' )
ds = gdal.Open( 'NITF_IM:1:tmp/nitf45.ntf', gdal.GA_Update )
ds.BuildOverviews( overviewlist = [2] )
# FIXME ? ds.GetRasterBand(1).GetOverview(0) is None until we reopen
ds = None
ds = gdal.Open( 'NITF_IM:1:tmp/nitf45.ntf' )
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
if cs != 1086:
print(cs)
gdaltest.post_reason('did not get expected checksum for overview of subdataset')
return 'fail'
ds = None
return 'success'
###############################################################################
# Check overviews on a JPEG2000 compressed subdataset
def nitf_46(driver_to_test):
try:
jp2_drv = gdal.GetDriverByName( driver_to_test )
except:
jp2_drv = None
if jp2_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
try:
os.remove('tmp/nitf46.ntf.aux.xml')
except:
pass
try:
os.remove('tmp/nitf46.ntf_0.ovr')
except:
pass
shutil.copyfile( 'data/two_images_jp2.ntf', 'tmp/nitf46.ntf' )
ds = gdal.Open( 'NITF_IM:1:tmp/nitf46.ntf', gdal.GA_Update )
ds.BuildOverviews( overviewlist = [2] )
# FIXME ? ds.GetRasterBand(1).GetOverview(0) is None until we reopen
ds = None
ds = gdal.Open( 'NITF_IM:1:tmp/nitf46.ntf' )
if ds.GetRasterBand(1).GetOverview(0) is None:
gdaltest.post_reason('no overview of subdataset')
ret = 'fail'
else:
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
if cs != 1086:
print(cs)
gdaltest.post_reason('did not get expected checksum for overview of subdataset')
ret = 'fail'
else:
ret = 'success'
ds = None
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def nitf_46_jp2ecw():
return nitf_46('JP2ECW')
def nitf_46_jp2mrsid():
return nitf_46('JP2MrSID')
def nitf_46_jp2kak():
return nitf_46('JP2KAK')
def nitf_46_jasper():
return nitf_46('JPEG2000')
def nitf_46_openjpeg():
return nitf_46('JP2OpenJPEG')
###############################################################################
# Check reading of rsets.
def nitf_47():
ds = gdal.Open( 'data/rset.ntf.r0' )
band = ds.GetRasterBand(2)
if band.GetOverviewCount() != 2:
gdaltest.post_reason( 'did not get the expected number of rset overviews.' )
return 'fail'
cs = band.GetOverview(1).Checksum()
if cs != 1297:
print(cs)
gdaltest.post_reason('did not get expected checksum for overview of subdataset')
return 'fail'
ds = None
return 'success'
###############################################################################
# Check building of standard overviews in place of rset overviews.
def nitf_48():
try:
os.remove('tmp/rset.ntf.r0')
os.remove('tmp/rset.ntf.r1')
os.remove('tmp/rset.ntf.r2')
os.remove('tmp/rset.ntf.r0.ovr')
except:
pass
shutil.copyfile( 'data/rset.ntf.r0', 'tmp/rset.ntf.r0' )
shutil.copyfile( 'data/rset.ntf.r1', 'tmp/rset.ntf.r1' )
shutil.copyfile( 'data/rset.ntf.r2', 'tmp/rset.ntf.r2' )
ds = gdal.Open( 'tmp/rset.ntf.r0', gdal.GA_Update )
ds.BuildOverviews( overviewlist = [3] )
ds = None
ds = gdal.Open( 'tmp/rset.ntf.r0' )
if ds.GetRasterBand(1).GetOverviewCount() != 1:
gdaltest.post_reason( 'did not get the expected number of rset overviews.' )
return 'fail'
cs = ds.GetRasterBand(1).GetOverview(0).Checksum()
if cs != 2328:
print(cs)
gdaltest.post_reason('did not get expected checksum for overview of subdataset')
return 'fail'
ds = None
try:
os.remove('tmp/rset.ntf.r0')
os.remove('tmp/rset.ntf.r1')
os.remove('tmp/rset.ntf.r2')
os.remove('tmp/rset.ntf.r0.ovr')
except:
pass
return 'success'
###############################################################################
# Test TEXT and CGM creation options with CreateCopy() (#3376)
def nitf_49():
options = [ "TEXT=DATA_0=COUCOU",
"TEXT=HEADER_0=ABC", # This content is invalid but who cares here
"CGM=SEGMENT_COUNT=1",
"CGM=SEGMENT_0_SLOC_ROW=25",
"CGM=SEGMENT_0_SLOC_COL=25",
"CGM=SEGMENT_0_SDLVL=2",
"CGM=SEGMENT_0_SALVL=1",
"CGM=SEGMENT_0_DATA=XYZ" ]
src_ds = gdal.Open('data/text_md.vrt')
# This will check that the creation option overrides the TEXT metadata domain from the source
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf49.ntf', src_ds,
options = options )
# Test copy from source TEXT and CGM metadata domains
ds2 = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf49_2.ntf', ds )
md = ds2.GetMetadata('TEXT')
if 'DATA_0' not in md or md['DATA_0'] != 'COUCOU' or \
'HEADER_0' not in md or md['HEADER_0'].find('ABC ') == -1:
gdaltest.post_reason('did not get expected TEXT metadata')
print(md)
return 'success'
md = ds2.GetMetadata('CGM')
if 'SEGMENT_COUNT' not in md or md['SEGMENT_COUNT'] != '1' or \
'SEGMENT_0_DATA' not in md or md['SEGMENT_0_DATA'] != 'XYZ' :
gdaltest.post_reason('did not get expected CGM metadata')
print(md)
return 'success'
src_ds = None
ds = None
ds2 = None
return 'success'
###############################################################################
# Test TEXT and CGM creation options with Create() (#3376)
def nitf_50():
options = [ #"IC=C8",
"TEXT=DATA_0=COUCOU",
"TEXT=HEADER_0=ABC", # This content is invalid but who cares here
"CGM=SEGMENT_COUNT=1",
"CGM=SEGMENT_0_SLOC_ROW=25",
"CGM=SEGMENT_0_SLOC_COL=25",
"CGM=SEGMENT_0_SDLVL=2",
"CGM=SEGMENT_0_SALVL=1",
"CGM=SEGMENT_0_DATA=XYZ" ]
try:
os.remove('tmp/nitf50.ntf')
except:
pass
# This will check that the creation option overrides the TEXT metadata domain from the source
ds = gdal.GetDriverByName('NITF').Create( 'tmp/nitf50.ntf', 100, 100, 3, options = options )
ds.WriteRaster( 0, 0, 100, 100, ' ', 1, 1,
buf_type = gdal.GDT_Byte,
band_list = [1,2,3] )
ds.GetRasterBand( 1 ).SetRasterColorInterpretation( gdal.GCI_BlueBand )
ds.GetRasterBand( 2 ).SetRasterColorInterpretation( gdal.GCI_GreenBand )
ds.GetRasterBand( 3 ).SetRasterColorInterpretation( gdal.GCI_RedBand )
# We need to reopen the dataset, because the TEXT and CGM segments are only written
# when closing the dataset (for JP2 compressed datastreams, we need to wait for the
# imagery to be written)
ds = None
ds = gdal.Open('tmp/nitf50.ntf')
md = ds.GetMetadata('TEXT')
if 'DATA_0' not in md or md['DATA_0'] != 'COUCOU' or \
'HEADER_0' not in md or md['HEADER_0'].find('ABC ') == -1:
gdaltest.post_reason('did not get expected TEXT metadata')
print(md)
return 'success'
md = ds.GetMetadata('CGM')
if 'SEGMENT_COUNT' not in md or md['SEGMENT_COUNT'] != '1' or \
'SEGMENT_0_DATA' not in md or md['SEGMENT_0_DATA'] != 'XYZ' :
gdaltest.post_reason('did not get expected CGM metadata')
print(md)
return 'success'
ds = None
return 'success'
###############################################################################
# Test reading very small images with NBPP < 8 or NBPP == 12
def nitf_51():
import struct
for xsize in range(1,9):
for nbpp in [1,2,3,4,5,6,7,12]:
ds = gdal.GetDriverByName('NITF').Create( 'tmp/nitf51.ntf', xsize, 1 )
ds = None
f = open('tmp/nitf51.ntf', 'rb+')
# Patch NBPP value at offset 811
f.seek(811)
f.write(struct.pack('B' * 2, 48 + int(nbpp/10), 48 + nbpp % 10))
# Write image data
f.seek(843)
n = int((xsize * nbpp+7) / 8)
for i in range(n):
f.write(struct.pack('B' * 1, 255))
f.close()
ds = gdal.Open('tmp/nitf51.ntf')
if nbpp == 12:
data = ds.GetRasterBand(1).ReadRaster(0, 0, xsize, 1, buf_type = gdal.GDT_UInt16)
arr = struct.unpack('H' * xsize, data)
else:
data = ds.GetRasterBand(1).ReadRaster(0, 0, xsize, 1)
arr = struct.unpack('B' * xsize, data)
ds = None
for i in range(xsize):
if arr[i] != (1 << nbpp) - 1:
gdaltest.post_reason('did not get expected data')
print('xsize = %d, nbpp = %d' % (xsize, nbpp))
print(arr)
return 'fail'
return 'success'
###############################################################################
# Test reading GeoSDE TREs
def nitf_52():
# Create a fake NITF file with GeoSDE TREs (probably not conformant, but enough to test GDAL code)
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf52.ntf', 1, 1, options = \
['FILE_TRE=GEOPSB=01234567890123456789012345678901234567890123456789012345678901234567890123456789012345EURM ', \
'FILE_TRE=PRJPSB=01234567890123456789012345678901234567890123456789012345678901234567890123456789AC0000000000000000000000000000000', \
'TRE=MAPLOB=M 0001000010000000000100000000000005000000'])
ds = None
ds = gdal.Open('tmp/nitf52.ntf')
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
if wkt != """PROJCS["unnamed",GEOGCS["EUROPEAN 1950, Mean (3 Param)",DATUM["EUROPEAN 1950, Mean (3 Param)",SPHEROID["International 1924 ",6378388,297],TOWGS84[-87,-98,-121,0,0,0,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Albers_Conic_Equal_Area"],PARAMETER["standard_parallel_1",0],PARAMETER["standard_parallel_2",0],PARAMETER["latitude_of_center",0],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0]]""":
gdaltest.post_reason('did not get expected SRS')
print(wkt)
return 'fail'
if gt != (100000.0, 10.0, 0.0, 5000000.0, 0.0, -10.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
return 'success'
###############################################################################
# Test reading UTM MGRS
def nitf_53():
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf53.ntf', 2, 2, options = ['ICORDS=N'])
ds = None
f = open('tmp/nitf53.ntf', 'rb+')
# Patch ICORDS and IGEOLO
f.seek(775)
if version_info >= (3,0,0):
exec("f.write(b'U')")
exec("f.write(b'31UBQ1000040000')")
exec("f.write(b'31UBQ2000040000')")
exec("f.write(b'31UBQ2000030000')")
exec("f.write(b'31UBQ1000030000')")
else:
f.write('U')
f.write('31UBQ1000040000')
f.write('31UBQ2000040000')
f.write('31UBQ2000030000')
f.write('31UBQ1000030000')
f.close()
ds = gdal.Open('tmp/nitf53.ntf')
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
if wkt.find("""PROJCS["UTM Zone 31, Northern Hemisphere",GEOGCS["WGS 84""") != 0:
gdaltest.post_reason('did not get expected SRS')
print(wkt)
return 'fail'
if gt != (205000.0, 10000.0, 0.0, 5445000.0, 0.0, -10000.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
return 'success'
###############################################################################
# Test reading RPC00B
def nitf_54():
# Create a fake NITF file with RPC00B TRE (probably not conformant, but enough to test GDAL code)
RPC00B='100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf54.ntf', 1, 1, options = ['TRE=RPC00B=' + RPC00B])
ds = None
ds = gdal.Open('tmp/nitf54.ntf')
md = ds.GetMetadata('RPC')
ds = None
if md is None or 'HEIGHT_OFF' not in md:
print(md)
return 'fail'
return 'success'
###############################################################################
# Test reading ICHIPB
def nitf_55():
# Create a fake NITF file with ICHIPB TRE (probably not conformant, but enough to test GDAL code)
ICHIPB='00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf55.ntf', 1, 1, options = ['TRE=ICHIPB=' + ICHIPB])
ds = None
ds = gdal.Open('tmp/nitf55.ntf')
md = ds.GetMetadata()
ds = None
if md is None or 'ICHIP_SCALE_FACTOR' not in md:
print(md)
return 'fail'
return 'success'
###############################################################################
# Test reading USE00A
def nitf_56():
# Create a fake NITF file with USE00A TRE (probably not conformant, but enough to test GDAL code)
USE00A='00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf56.ntf', 1, 1, options = ['TRE=USE00A=' + USE00A])
ds = None
ds = gdal.Open('tmp/nitf56.ntf')
md = ds.GetMetadata()
ds = None
if md is None or 'NITF_USE00A_ANGLE_TO_NORTH' not in md:
print(md)
return 'fail'
return 'success'
###############################################################################
# Test reading GEOLOB
def nitf_57():
# Create a fake NITF file with GEOLOB TRE
GEOLOB='000000360000000360-180.000000000090.000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf57.ntf', 1, 1, options = ['TRE=GEOLOB=' + GEOLOB])
ds = None
ds = gdal.Open('tmp/nitf57.ntf')
gt = ds.GetGeoTransform()
ds = None
if gt != (-180.0, 1.0, 0.0, 90.0, 0.0, -1.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'success'
return 'success'
###############################################################################
# Test reading STDIDC
def nitf_58():
# Create a fake NITF file with STDIDC TRE (probably not conformant, but enough to test GDAL code)
STDIDC='00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf58.ntf', 1, 1, options = ['TRE=STDIDC=' + STDIDC])
ds = None
ds = gdal.Open('tmp/nitf58.ntf')
md = ds.GetMetadata()
ds = None
if md is None or 'NITF_STDIDC_ACQUISITION_DATE' not in md:
print(md)
return 'fail'
return 'success'
###############################################################################
# Test georeferencing through .nfw and .hdr files
def nitf_59():
shutil.copyfile('data/nitf59.nfw', 'tmp/nitf59.nfw')
shutil.copyfile('data/nitf59.hdr', 'tmp/nitf59.hdr')
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf59.ntf', 1, 1, options = ['ICORDS=N'])
ds = None
ds = gdal.Open('tmp/nitf59.ntf')
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
if wkt.find("""PROJCS["UTM Zone 31, Northern Hemisphere",GEOGCS["WGS 84""") != 0 :
gdaltest.post_reason('did not get expected SRS')
print(wkt)
return 'fail'
if gt != (149999.5, 1.0, 0.0, 4500000.5, 0.0, -1.0):
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
return 'success'
###############################################################################
# Test reading CADRG polar tile georeferencing (#2940)
def nitf_60():
# Shut down errors because the file is truncated
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.Open('data/testtest.on9')
gdal.PopErrorHandler()
wkt = ds.GetProjectionRef()
gt = ds.GetGeoTransform()
ds = None
if wkt != """PROJCS["unnamed",GEOGCS["unnamed ellipse",DATUM["unknown",SPHEROID["unnamed",6378137,0]],PRIMEM["Greenwich",0],UNIT["degree",0.0174532925199433]],PROJECTION["Azimuthal_Equidistant"],PARAMETER["latitude_of_center",90],PARAMETER["longitude_of_center",0],PARAMETER["false_easting",0],PARAMETER["false_northing",0],UNIT["Meter",1]]""":
gdaltest.post_reason('did not get expected SRS')
print(wkt)
return 'fail'
ref_gt = [1036422.8453166834, 149.94543479697344, 0.0, 345474.28177222813, 0.0, -149.94543479697404]
for i in range(6):
if abs(gt[i]-ref_gt[i]) > 1e-6:
gdaltest.post_reason('did not get expected geotransform')
print(gt)
return 'fail'
return 'success'
###############################################################################
# Test reading TRE from DE segment
def nitf_61():
# Derived from http://www.gwg.nga.mil/ntb/baseline/software/testfile/rsm/SampleFiles/FrameSet1/NITF_Files/i_6130a.zip
# but hand edited to have just 1x1 imagery
ds = gdal.Open('data/i_6130a_truncated.ntf')
md = ds.GetMetadata('TRE')
xml_tre = ds.GetMetadata('xml:TRE')[0]
ds = None
if md is None or 'RSMDCA' not in md or 'RSMECA' not in md or 'RSMPCA' not in md or 'RSMIDA' not in md:
print(md)
return 'fail'
if xml_tre.find('<tre name="RSMDCA"') == -1:
gdaltest.post_reason('did not get expected xml:TRE')
print(xml_tre[0])
return 'fail'
return 'success'
###############################################################################
# Test creating & reading image comments
def nitf_62():
# 80+1 characters
comments = '012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678ZA'
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf62.ntf', 1, 1, options = ['ICOM=' + comments])
ds = None
ds = gdal.Open('tmp/nitf62.ntf')
md = ds.GetMetadata()
ds = None
got_comments = md['NITF_IMAGE_COMMENTS']
if len(got_comments) != 160 or got_comments.find(comments) == -1:
gdaltest.post_reason('did not get expected comments')
print("'%s'" % got_comments)
return 'fail'
return 'success'
###############################################################################
# Test NITFReadImageLine() and NITFWriteImageLine() when nCols < nBlockWidth (#3551)
def nitf_63():
ds = gdal.GetDriverByName('NITF').Create('tmp/nitf63.ntf', 50, 25, 3, gdal.GDT_Int16, options = ['BLOCKXSIZE=256'])
ds = None
try:
os.SEEK_SET
except AttributeError:
os.SEEK_SET, os.SEEK_CUR, os.SEEK_END = list(range(3))
# Patch IMODE at hand
f = open('tmp/nitf63.ntf', 'r+')
f.seek(820, os.SEEK_SET)
f.write('P')
f.close()
ds = gdal.Open('tmp/nitf63.ntf', gdal.GA_Update)
md = ds.GetMetadata()
if md['NITF_IMODE'] != 'P':
gdaltest.post_reason('wrong IMODE')
return 'fail'
ds.GetRasterBand(1).Fill(0)
ds.GetRasterBand(2).Fill(127)
ds.GetRasterBand(3).Fill(255)
ds = None
ds = gdal.Open('tmp/nitf63.ntf')
cs1 = ds.GetRasterBand(1).Checksum()
cs2 = ds.GetRasterBand(2).Checksum()
cs3 = ds.GetRasterBand(3).Checksum()
ds = None
if cs1 != 0 or cs2 != 14186 or cs3 != 15301:
gdaltest.post_reason('did not get expected checksums : (%d, %d, %d) instead of (0, 14186, 15301)' % (cs1, cs2, cs3))
return 'fail'
return 'success'
###############################################################################
# Test SDE_TRE creation option
def nitf_64():
src_ds = gdal.GetDriverByName('GTiff').Create('/vsimem/nitf_64.tif', 256, 256, 1)
src_ds.SetGeoTransform([2.123456789, 0.123456789, 0, 49.123456789, 0, -0.123456789])
sr = osr.SpatialReference()
sr.SetWellKnownGeogCS('WGS84')
src_ds.SetProjection(sr.ExportToWkt())
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_64.ntf', src_ds, options = ['ICORDS=D'])
ds = None
ds = gdal.Open('/vsimem/nitf_64.ntf')
# One can notice that the topleft location is only precise to the 3th decimal !
expected_gt = (2.123270588235294, 0.12345882352941177, 0.0, 49.123729411764707, 0.0, -0.12345882352941176)
got_gt = ds.GetGeoTransform()
for i in range(6):
if abs(expected_gt[i] - got_gt[i]) > 1e-10:
gdaltest.post_reason('did not get expected GT in ICORDS=D mode')
print(got_gt)
return 'fail'
ds = None
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_64.ntf', src_ds, options = ['ICORDS=G'])
ds = None
ds = gdal.Open('/vsimem/nitf_64.ntf')
# One can notice that the topleft location is only precise to the 3th decimal !
expected_gt = (2.1235495642701521, 0.12345642701525053, 0.0, 49.123394880174288, 0.0, -0.12345642701525052)
got_gt = ds.GetGeoTransform()
for i in range(6):
if abs(expected_gt[i] - got_gt[i]) > 1e-10:
gdaltest.post_reason('did not get expected GT in ICORDS=G mode')
print(got_gt)
return 'fail'
ds = None
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_64.ntf', src_ds, options = ['SDE_TRE=YES'])
ds = None
ds = gdal.Open('/vsimem/nitf_64.ntf')
# One can notice that the topleft location is precise up to the 9th decimal
expected_gt = (2.123456789, 0.1234567901234568, 0.0, 49.123456789000002, 0.0, -0.12345679012345678)
got_gt = ds.GetGeoTransform()
for i in range(6):
if abs(expected_gt[i] - got_gt[i]) > 1e-10:
gdaltest.post_reason('did not get expected GT in SDE_TRE mode')
print(got_gt)
return 'fail'
ds = None
src_ds = None
gdal.Unlink('/vsimem/nitf_64.tif')
gdal.Unlink('/vsimem/nitf_64.ntf')
return 'success'
###############################################################################
# Test creating an image with block_width = image_width > 8192 (#3922)
def nitf_65():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_65.ntf', 10000, 100, options = ['BLOCKXSIZE=10000'])
ds = None
ds = gdal.Open('/vsimem/nitf_65.ntf')
(block_xsize, block_ysize) = ds.GetRasterBand(1).GetBlockSize()
ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/nitf_65.ntf')
if block_xsize != 10000:
print(block_xsize)
return 'fail'
return 'success'
###############################################################################
# Test creating an image with block_height = image_height > 8192 (#3922)
def nitf_66():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_66.ntf', 100, 10000, options = ['BLOCKYSIZE=10000', 'BLOCKXSIZE=50'])
ds = None
ds = gdal.Open('/vsimem/nitf_66.ntf')
(block_xsize, block_ysize) = ds.GetRasterBand(1).GetBlockSize()
ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/nitf_66.ntf')
if block_ysize != 10000:
print(block_ysize)
return 'fail'
return 'success'
###############################################################################
# Test that we don't use scanline access in illegal cases (#3926)
def nitf_67():
src_ds = gdal.Open('data/byte.tif')
gdal.PushErrorHandler('CPLQuietErrorHandler')
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_67.ntf', src_ds, options = ['BLOCKYSIZE=1', 'BLOCKXSIZE=10'], strict=0)
gdal.PopErrorHandler()
ds = None
src_ds = None
ds = gdal.Open('/vsimem/nitf_67.ntf')
cs = ds.GetRasterBand(1).Checksum()
ds = None
gdal.Unlink('/vsimem/nitf_67.ntf')
if cs != 4672:
print(cs)
return 'fail'
return 'success'
###############################################################################
# Test reading NITF_METADATA domain
def nitf_68():
ds = gdal.Open('data/rgb.ntf')
if len(ds.GetMetadata('NITF_METADATA')) != 2:
print(ds.GetMetadata('NITF_METADATA'))
return 'fail'
ds = None
ds = gdal.Open('data/rgb.ntf')
if len(ds.GetMetadataItem('NITFFileHeader','NITF_METADATA')) == 0:
print(ds.GetMetadataItem('NITFFileHeader','NITF_METADATA'))
return 'fail'
ds = None
return 'success'
###############################################################################
# Test SetGCPs() support
def nitf_69():
vrt_txt = """<VRTDataset rasterXSize="20" rasterYSize="20">
<GCPList Projection='GEOGCS["WGS 84",DATUM["WGS_1984",SPHEROID["WGS 84",6378137,298.257223563,AUTHORITY["EPSG","7030"]],AUTHORITY["EPSG","6326"]],PRIMEM["Greenwich",0,AUTHORITY["EPSG","8901"]],UNIT["degree",0.0174532925199433,AUTHORITY["EPSG","9122"]],AUTHORITY["EPSG","4326"]]'>
<GCP Id="" Pixel="0.5" Line="0.5" X="2" Y="49"/>
<GCP Id="" Pixel="0.5" Line="19.5" X="2" Y="48"/>
<GCP Id="" Pixel="19.5" Line="0.5" X="3" Y="49.5"/>
<GCP Id="" Pixel="19.5" Line="19.5" X="3" Y="48"/>
</GCPList>
<VRTRasterBand dataType="Byte" band="1">
<SimpleSource>
<SourceFilename relativeToVRT="1">data/byte.tif</SourceFilename>
<SourceProperties RasterXSize="20" RasterYSize="20" DataType="Byte" BlockXSize="20" BlockYSize="20" />
<SourceBand>1</SourceBand>
</SimpleSource>
</VRTRasterBand>
</VRTDataset>"""
# Test CreateCopy()
vrt_ds = gdal.Open(vrt_txt)
ds = gdal.GetDriverByName('NITF').CreateCopy('/vsimem/nitf_69_src.ntf', vrt_ds)
ds = None
vrt_ds = None
# Just in case
gdal.Unlink('/vsimem/nitf_69_src.ntf.aux.xml')
# Test Create() and SetGCPs()
src_ds = gdal.Open('/vsimem/nitf_69_src.ntf')
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_69_dest.ntf', 20, 20, 1, options = ['ICORDS=G'])
ds.SetGCPs(src_ds.GetGCPs(), src_ds.GetGCPProjection())
ds.SetGCPs(src_ds.GetGCPs(), src_ds.GetGCPProjection()) # To check we can call it several times without error
ds = None
src_ds = None
# Now open again
ds = gdal.Open('/vsimem/nitf_69_dest.ntf')
got_gcps = ds.GetGCPs()
ds = None
gdal.Unlink('/vsimem/nitf_69_src.ntf')
gdal.Unlink('/vsimem/nitf_69_dest.ntf')
# Check
# Upper-left
if abs(got_gcps[0].GCPPixel - 0.5) > 1e-5 or abs(got_gcps[0].GCPLine - 0.5) > 1e-5 or \
abs(got_gcps[0].GCPX - 2) > 1e-5 or abs(got_gcps[0].GCPY - 49) > 1e-5:
gdaltest.post_reason('wrong gcp')
print(got_gcps[0])
return 'fail'
# Upper-right
if abs(got_gcps[1].GCPPixel - 19.5) > 1e-5 or abs(got_gcps[1].GCPLine - 0.5) > 1e-5 or \
abs(got_gcps[1].GCPX - 3) > 1e-5 or abs(got_gcps[1].GCPY - 49.5) > 1e-5:
gdaltest.post_reason('wrong gcp')
print(got_gcps[1])
return 'fail'
# Lower-right
if abs(got_gcps[2].GCPPixel - 19.5) > 1e-5 or abs(got_gcps[2].GCPLine - 19.5) > 1e-5 or \
abs(got_gcps[2].GCPX - 3) > 1e-5 or abs(got_gcps[2].GCPY - 48) > 1e-5:
gdaltest.post_reason('wrong gcp')
print(got_gcps[2])
return 'fail'
# Lower-left
if abs(got_gcps[3].GCPPixel - 0.5) > 1e-5 or abs(got_gcps[3].GCPLine - 19.5) > 1e-5 or \
abs(got_gcps[3].GCPX - 2) > 1e-5 or abs(got_gcps[3].GCPY - 48) > 1e-5:
gdaltest.post_reason('wrong gcp')
print(got_gcps[3])
return 'fail'
return 'success'
###############################################################################
# Create and read a JPEG encoded NITF file with NITF dimensions != JPEG dimensions
def nitf_70():
src_ds = gdal.Open( 'data/rgbsmall.tif' )
ds = gdal.GetDriverByName('NITF').CreateCopy( 'tmp/nitf_70.ntf', src_ds,
options = ['IC=C3', 'BLOCKXSIZE=64', 'BLOCKYSIZE=64'] )
ds = None
# For comparison
ds = gdal.GetDriverByName('GTiff').CreateCopy( 'tmp/nitf_70.tif', src_ds,
options = ['COMPRESS=JPEG', 'PHOTOMETRIC=YCBCR', 'TILED=YES', 'BLOCKXSIZE=64', 'BLOCKYSIZE=64'] )
ds = None
src_ds = None
ds = gdal.Open( 'tmp/nitf_70.ntf' )
cs = ds.GetRasterBand(1).Checksum()
ds = None
ds = gdal.Open( 'tmp/nitf_70.tif' )
cs_ref = ds.GetRasterBand(1).Checksum()
ds = None
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf_70.ntf' )
gdal.GetDriverByName('GTiff').Delete( 'tmp/nitf_70.tif' )
if cs != cs_ref:
print(cs)
print(cs_ref)
return 'fail'
return 'success'
###############################################################################
# Test reading ENGRDA TRE (#6285)
def nitf_71():
ds = gdal.GetDriverByName('NITF').Create('/vsimem/nitf_71.ntf', 1, 1, options = \
['TRE=ENGRDA=0123456789012345678900210012345678901230123X01200000002XY01X01230123X01200000001X'])
ds = None
ds = gdal.Open('/vsimem/nitf_71.ntf')
data = ds.GetMetadata('xml:TRE')[0]
ds = None
gdal.GetDriverByName('NITF').Delete( '/vsimem/nitf_71.ntf' )
expected_data = """<tres>
<tre name="ENGRDA" location="image">
<field name="RESRC" value="01234567890123456789" />
<field name="RECNT" value="002" />
<repeated name="RECORDS" number="2">
<group index="0">
<field name="ENGLN" value="10" />
<field name="ENGLBL" value="0123456789" />
<field name="ENGMTXC" value="0123" />
<field name="ENGMTXR" value="0123" />
<field name="ENGTYP" value="X" />
<field name="ENGDTS" value="0" />
<field name="ENGDTU" value="12" />
<field name="ENGDATC" value="00000002" />
<field name="ENGDATA" value="XY" />
</group>
<group index="1">
<field name="ENGLN" value="01" />
<field name="ENGLBL" value="X" />
<field name="ENGMTXC" value="0123" />
<field name="ENGMTXR" value="0123" />
<field name="ENGTYP" value="X" />
<field name="ENGDTS" value="0" />
<field name="ENGDTU" value="12" />
<field name="ENGDATC" value="00000001" />
<field name="ENGDATA" value="X" />
</group>
</repeated>
</tre>
</tres>
"""
if data != expected_data:
print(data)
return 'fail'
return 'success'
###############################################################################
# Test NITF21_CGM_ANNO_Uncompressed_unmasked.ntf for bug #1313 and #1714
def nitf_online_1():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/bugs/NITF21_CGM_ANNO_Uncompressed_unmasked.ntf', 'NITF21_CGM_ANNO_Uncompressed_unmasked.ntf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/NITF21_CGM_ANNO_Uncompressed_unmasked.ntf', 1, 13123, filename_absolute = 1 )
# Shut up the warning about missing image segment
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ret = tst.testOpen()
gdal.PopErrorHandler()
return ret
###############################################################################
# Test NITF file with multiple images
def nitf_online_2():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf1.1/U_0001a.ntf', 'U_0001a.ntf'):
return 'skip'
ds = gdal.Open( 'tmp/cache/U_0001a.ntf' )
md = ds.GetMetadata('SUBDATASETS')
if 'SUBDATASET_1_NAME' not in md:
gdaltest.post_reason( 'missing SUBDATASET_1_NAME metadata' )
return 'fail'
ds = None
return 'success'
###############################################################################
# Test ARIDPCM (C2) image
def nitf_online_3():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf1.1/U_0001a.ntf', 'U_0001a.ntf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'NITF_IM:3:tmp/cache/U_0001a.ntf', 1, 23463, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test Vector Quantization (VQ) (C4) file
def nitf_online_4():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/cadrg/001zc013.on1', '001zc013.on1'):
return 'skip'
# check that the RPF attribute metadata was carried through.
ds = gdal.Open( 'tmp/cache/001zc013.on1' )
md = ds.GetMetadata()
if md['NITF_RPF_CurrencyDate'] != '19950720' \
or md['NITF_RPF_ProductionDate'] != '19950720' \
or md['NITF_RPF_SignificantDate'] != '19890629':
gdaltest.post_reason( 'RPF attribute metadata not captured (#3413)')
return 'fail'
ds = None
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/001zc013.on1', 1, 53960, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test Vector Quantization (VQ) (M4) file
def nitf_online_5():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/cadrg/overview.ovr', 'overview.ovr'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/overview.ovr', 1, 60699, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test a JPEG compressed, single blocked 2048x2048 mono image
def nitf_online_6():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_4001b.ntf', 'U_4001b.ntf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/U_4001b.ntf', 1, 60030, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test all combinations of IMODE (S,P,B,R) for an image with 6 bands whose 3 are RGB
def nitf_online_7():
files = [ 'ns3228b.nsf', 'i_3228c.ntf', 'ns3228d.nsf', 'i_3228e.ntf' ]
for file in files:
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/' + file, file):
return 'skip'
ds = gdal.Open('tmp/cache/' + file)
if ds.RasterCount != 6:
return 'fail'
checksums = [ 48385, 48385, 40551, 54223, 48385, 33094 ]
colorInterpretations = [ gdal.GCI_Undefined, gdal.GCI_Undefined, gdal.GCI_RedBand, gdal.GCI_BlueBand, gdal.GCI_Undefined, gdal.GCI_GreenBand ]
for i in range(6):
cs = ds.GetRasterBand(i+1).Checksum()
if cs != checksums[i]:
gdaltest.post_reason( 'got checksum %d for image %s' \
% (cs, file) )
return 'fail'
if ds.GetRasterBand(i+1).GetRasterColorInterpretation() != colorInterpretations[i]:
gdaltest.post_reason( 'got wrong color interp for image %s' \
% file )
return 'fail'
ds = None
#shutil.copyfile('tmp/cache/' + file, 'tmp/' + file)
#ds = gdal.Open('tmp/' + file, gdal.GA_Update)
#data = ds.GetRasterBand(1).ReadRaster(0, 0, 1024, 1024)
#ds.GetRasterBand(1).Fill(0)
#ds = None
#ds = gdal.Open('tmp/' + file, gdal.GA_Update)
#ds.GetRasterBand(1).WriteRaster(0, 0, 1024, 1024, data)
#ds = None
#ds = gdal.Open('tmp/' + file)
#print(ds.GetRasterBand(1).Checksum())
#ds = None
#os.remove('tmp/' + file)
return 'success'
###############################################################################
# Test JPEG-compressed multi-block mono-band image with a data mask subheader (IC=M3, IMODE=B)
def nitf_online_8():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3301j.nsf', 'ns3301j.nsf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/ns3301j.nsf', 1, 56861, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test JPEG-compressed multi-block mono-band image without a data mask subheader (IC=C3, IMODE=B)
def nitf_online_9():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3304a.nsf', 'ns3304a.nsf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/ns3304a.nsf', 1, 32419, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Verify that CGM access on a file with 8 CGM segments
def nitf_online_10():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3119b.nsf', 'ns3119b.nsf'):
return 'skip'
# Shut up the warning about missing image segment
gdal.PushErrorHandler( 'CPLQuietErrorHandler' )
ds = gdal.Open( 'tmp/cache/ns3119b.nsf' )
gdal.PopErrorHandler()
mdCGM = ds.GetMetadata( 'CGM' )
ds = None
if mdCGM['SEGMENT_COUNT'] != '8':
gdaltest.post_reason( 'wrong SEGMENT_COUNT.' )
return 'fail'
tab = [
('SEGMENT_0_SLOC_ROW', '0'),
('SEGMENT_0_SLOC_COL', '0'),
('SEGMENT_0_CCS_COL', '0'),
('SEGMENT_0_CCS_COL', '0'),
('SEGMENT_0_SDLVL', '1'),
('SEGMENT_0_SALVL', '0'),
('SEGMENT_1_SLOC_ROW', '0'),
('SEGMENT_1_SLOC_COL', '684'),
('SEGMENT_2_SLOC_ROW', '0'),
('SEGMENT_2_SLOC_COL', '1364'),
('SEGMENT_3_SLOC_ROW', '270'),
('SEGMENT_3_SLOC_COL', '0'),
('SEGMENT_4_SLOC_ROW', '270'),
('SEGMENT_4_SLOC_COL', '684'),
('SEGMENT_5_SLOC_ROW', '270'),
('SEGMENT_5_SLOC_COL', '1364'),
('SEGMENT_6_SLOC_ROW', '540'),
('SEGMENT_6_SLOC_COL', '0'),
('SEGMENT_7_SLOC_ROW', '540'),
('SEGMENT_7_SLOC_COL', '1364'),
('SEGMENT_7_CCS_ROW', '540'),
('SEGMENT_7_CCS_COL', '1364'),
('SEGMENT_7_SDLVL', '8'),
('SEGMENT_7_SALVL', '0'),
]
for item in tab:
if mdCGM[item[0]] != item[1]:
gdaltest.post_reason( 'wrong value for %s.' % item[0] )
return 'fail'
return 'success'
###############################################################################
# 5 text files
def nitf_online_11():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_1122a.ntf', 'U_1122a.ntf'):
return 'skip'
ds = gdal.Open( 'tmp/cache/U_1122a.ntf' )
mdTEXT = ds.GetMetadata( 'TEXT' )
ds = None
if mdTEXT['DATA_0'] != 'This is test text file 01.\r\n':
gdaltest.post_reason( 'did not find expected DATA_0 from metadata.' )
return 'fail'
if mdTEXT['DATA_1'] != 'This is test text file 02.\r\n':
gdaltest.post_reason( 'did not find expected DATA_1 from metadata.' )
return 'fail'
if mdTEXT['DATA_2'] != 'This is test text file 03.\r\n':
gdaltest.post_reason( 'did not find expected DATA_2 from metadata.' )
return 'fail'
if mdTEXT['DATA_3'] != 'This is test text file 04.\r\n':
gdaltest.post_reason( 'did not find expected DATA_3 from metadata.' )
return 'fail'
if mdTEXT['DATA_4'] != 'This is test text file 05.\r\n':
gdaltest.post_reason( 'did not find expected DATA_4 from metadata.' )
return 'fail'
return 'success'
###############################################################################
# Test 12 bit uncompressed image.
def nitf_online_12():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/bugs/i_3430a.ntf', 'i_3430a.ntf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/i_3430a.ntf', 1, 38647,
filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test complex relative graphic/image attachment.
def nitf_online_13():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/u_3054a.ntf', 'u_3054a.ntf'):
return 'skip'
# Shut up the warning about missing image segment
ds = gdal.Open( 'NITF_IM:2:tmp/cache/u_3054a.ntf' )
mdCGM = ds.GetMetadata( 'CGM' )
md = ds.GetMetadata()
ds = None
if mdCGM['SEGMENT_COUNT'] != '3':
gdaltest.post_reason( 'wrong SEGMENT_COUNT.' )
return 'fail'
tab = [
('SEGMENT_2_SLOC_ROW', '0'),
('SEGMENT_2_SLOC_COL', '0'),
('SEGMENT_2_CCS_COL', '1100'),
('SEGMENT_2_CCS_COL', '1100'),
('SEGMENT_2_SDLVL', '6'),
('SEGMENT_2_SALVL', '3')
]
for item in tab:
if mdCGM[item[0]] != item[1]:
gdaltest.post_reason( 'wrong value for %s.' % item[0] )
return 'fail'
tab = [
('NITF_IDLVL','3'),
('NITF_IALVL','1'),
('NITF_ILOC_ROW','1100'),
('NITF_ILOC_COLUMN','1100'),
('NITF_CCS_ROW','1100'),
('NITF_CCS_COLUMN','1100'),
]
for item in tab:
if md[item[0]] != item[1]:
gdaltest.post_reason( 'wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]) )
return 'fail'
return 'success'
###############################################################################
# Check reading a 12-bit JPEG compressed NITF (multi-block)
def nitf_online_14():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_4020h.ntf', 'U_4020h.ntf'):
return 'skip'
try:
os.remove('tmp/cache/U_4020h.ntf.aux.xml')
except:
pass
# Check if JPEG driver supports 12bit JPEG reading/writing
jpg_drv = gdal.GetDriverByName('JPEG')
md = jpg_drv.GetMetadata()
if md[gdal.DMD_CREATIONDATATYPES].find('UInt16') == -1:
sys.stdout.write('(12bit jpeg not available) ... ')
return 'skip'
ds = gdal.Open('tmp/cache/U_4020h.ntf')
if ds.GetRasterBand(1).DataType != gdal.GDT_UInt16:
return 'fail'
stats = ds.GetRasterBand(1).GetStatistics( 0, 1 )
if stats[2] < 2607 or stats[2] > 2608:
print(stats)
return 'fail'
ds = None
try:
os.remove('tmp/cache/U_4020h.ntf.aux.xml')
except:
pass
return 'success'
###############################################################################
# Test opening a IC=C8 NITF file with the various JPEG2000 drivers
def nitf_online_15(driver_to_test, expected_cs = 1054):
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Jpeg2000/p0_01/p0_01a.ntf', 'p0_01a.ntf'):
return 'skip'
try:
jp2_drv = gdal.GetDriverByName( driver_to_test )
except:
jp2_drv = None
if jp2_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('tmp/cache/p0_01a.ntf')
if ds.GetRasterBand(1).Checksum() == expected_cs:
ret = 'success'
else:
print(ds.GetRasterBand(1).Checksum())
gdaltest.post_reason( 'Did not get expected checksums' );
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def nitf_online_15_jp2ecw():
return nitf_online_15('JP2ECW')
def nitf_online_15_jp2mrsid():
return nitf_online_15('JP2MrSID')
def nitf_online_15_jp2kak():
return nitf_online_15('JP2KAK')
def nitf_online_15_jasper():
return nitf_online_15('JPEG2000')
def nitf_online_15_openjpeg():
return nitf_online_15('JP2OpenJPEG')
###############################################################################
# Test opening a IC=C8 NITF file which has 256-entry palette/LUT in both JP2 Header and image Subheader
# We expect RGB expansion from some JPEG2000 driver
def nitf_online_16(driver_to_test):
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Jpeg2000/jp2_09/file9_jp2_2places.ntf', 'file9_jp2_2places.ntf'):
return 'skip'
try:
jp2_drv = gdal.GetDriverByName( driver_to_test )
except:
jp2_drv = None
if jp2_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('tmp/cache/file9_jp2_2places.ntf')
# JPEG2000 driver
if ds.RasterCount == 3 and \
ds.GetRasterBand(1).Checksum() == 48954 and \
ds.GetRasterBand(2).Checksum() == 4939 and \
ds.GetRasterBand(3).Checksum() == 17734 :
ret = 'success'
elif ds.RasterCount == 1 and \
ds.GetRasterBand(1).Checksum() == 47664 and \
ds.GetRasterBand(1).GetRasterColorTable() != None:
ret = 'success'
else:
print(ds.RasterCount)
for i in range(ds.RasterCount):
print(ds.GetRasterBand(i+1).Checksum())
print(ds.GetRasterBand(1).GetRasterColorTable())
gdaltest.post_reason( 'Did not get expected checksums' );
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def nitf_online_16_jp2ecw():
return nitf_online_16('JP2ECW')
def nitf_online_16_jp2mrsid():
return nitf_online_16('JP2MrSID')
def nitf_online_16_jp2kak():
return nitf_online_16('JP2KAK')
def nitf_online_16_jasper():
return nitf_online_16('JPEG2000')
def nitf_online_16_openjpeg():
return nitf_online_16('JP2OpenJPEG')
###############################################################################
# Test opening a IC=C8 NITF file which has 256-entry/LUT in Image Subheader, JP2 header completely removed
# We don't expect RGB expansion from the JPEG2000 driver
def nitf_online_17(driver_to_test):
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Jpeg2000/jp2_09/file9_j2c.ntf', 'file9_j2c.ntf'):
return 'skip'
try:
jp2_drv = gdal.GetDriverByName( driver_to_test )
except:
jp2_drv = None
if jp2_drv is None:
return 'skip'
# Deregister other potential conflicting JPEG2000 drivers
gdaltest.deregister_all_jpeg2000_drivers_but(driver_to_test)
ds = gdal.Open('tmp/cache/file9_j2c.ntf')
if ds.RasterCount == 1 and \
ds.GetRasterBand(1).Checksum() == 47664 and \
ds.GetRasterBand(1).GetRasterColorTable() != None:
ret = 'success'
else:
print(ds.RasterCount)
for i in range(ds.RasterCount):
print(ds.GetRasterBand(i+1).Checksum())
print(ds.GetRasterBand(1).GetRasterColorTable())
gdaltest.post_reason( 'Did not get expected checksums' );
ret = 'fail'
gdaltest.reregister_all_jpeg2000_drivers()
return ret
def nitf_online_17_jp2ecw():
return nitf_online_17('JP2ECW')
def nitf_online_17_jp2mrsid():
return nitf_online_17('JP2MrSID')
def nitf_online_17_jp2kak():
return nitf_online_17('JP2KAK')
def nitf_online_17_jasper():
return nitf_online_17('JPEG2000')
def nitf_online_17_openjpeg():
return nitf_online_17('JP2OpenJPEG')
###############################################################################
# Test polar stereographic CADRG tile.
def nitf_online_18():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/bugs/bug3337.ntf', 'bug3337.ntf'):
return 'skip'
ds = gdal.Open('tmp/cache/bug3337.ntf')
gt = ds.GetGeoTransform()
prj = ds.GetProjection()
# If we have functioning coordinate transformer.
if prj[:6] == 'PROJCS':
if prj.find('Azimuthal_Equidistant') == -1:
gdaltest.post_reason( 'wrong projection?' )
return 'fail'
expected_gt=(-1669792.3618991028, 724.73626818537502, 0.0, -556597.45396636717, 0.0, -724.73626818537434)
if not gdaltest.geotransform_equals( gt, expected_gt, 1.0 ):
gdaltest.post_reason( 'did not get expected geotransform.' )
return 'fail'
# If we do not have a functioning coordinate transformer.
else:
if prj != '' \
or not gdaltest.geotransform_equals(gt,(0,1,0,0,0,1),0.00000001):
print(gt)
print(prj)
gdaltest.post_reason( 'did not get expected empty gt/projection' )
return 'fail'
prj = ds.GetGCPProjection()
if prj[:6] != 'GEOGCS':
gdaltest.post_reason( 'did not get expected geographic srs' )
return 'fail'
gcps = ds.GetGCPs()
gcp3 = gcps[3]
if gcp3.GCPPixel != 0 or gcp3.GCPLine != 1536 \
or abs(gcp3.GCPX+45) > 0.0000000001 \
or abs(gcp3.GCPY-68.78679656) > 0.00000001:
gdaltest.post_reason( 'did not get expected gcp.')
return 'fail'
ds = None
return 'success'
###############################################################################
# Test CADRG tile crossing dateline (#3383)
def nitf_online_19():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/0000M033.GN3', '0000M033.GN3'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/0000M033.GN3', 1, 38928,
filename_absolute = 1 )
return tst.testOpen( check_gt = (174.375000000000000,0.010986328125000,0,
51.923076923076927,0,-0.006760817307692) )
###############################################################################
# Check that the RPF attribute metadata was carried through.
# Special case where the reported size of the attribute subsection is
# smaller than really available
def nitf_online_20():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/0000M033.GN3', '0000M033.GN3'):
return 'skip'
# check that the RPF attribute metadata was carried through.
# Special case where the reported size of the attribute subsection is
# smaller than really available
ds = gdal.Open( 'tmp/cache/0000M033.GN3' )
md = ds.GetMetadata()
if md['NITF_RPF_CurrencyDate'] != '19941201' \
or md['NITF_RPF_ProductionDate'] != '19980511' \
or md['NITF_RPF_SignificantDate'] != '19850305':
gdaltest.post_reason( 'RPF attribute metadata not captured (#3413)')
return 'fail'
return 'success'
###############################################################################
# Check that we can read NITF header located in STREAMING_FILE_HEADER DE
# segment when header at beginning of file is incomplete
def nitf_online_21():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv2_1/ns3321a.nsf', 'ns3321a.nsf'):
return 'skip'
ds = gdal.Open( 'tmp/cache/ns3321a.nsf' )
md = ds.GetMetadata()
ds = None
# If we get NS3321A, it means we are not exploiting the header from the STREAMING_FILE_HEADER DE segment
if md['NITF_OSTAID'] != 'I_3321A':
gdaltest.post_reason('did not get expected OSTAID value')
print(md['NITF_OSTAID'])
return 'fail'
return 'success'
###############################################################################
# Test fix for #3002 (reconcile NITF file with LA segments)
#
def nitf_online_22():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/software/testfile/Nitfv1_1/U_0001C.NTF', 'U_0001C.NTF'):
return 'skip'
ds = gdal.Open( 'NITF_IM:1:tmp/cache/U_0001C.NTF' )
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL','6'),
('NITF_IALVL','1'),
('NITF_ILOC_ROW','360'),
('NITF_ILOC_COLUMN','380'),
('NITF_CCS_ROW','425'),
('NITF_CCS_COLUMN','410'),
]
for item in tab:
if md[item[0]] != item[1]:
gdaltest.post_reason( '(1) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]) )
return 'fail'
ds = gdal.Open( 'NITF_IM:2:tmp/cache/U_0001C.NTF' )
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL','11'),
('NITF_IALVL','2'),
('NITF_ILOC_ROW','360'),
('NITF_ILOC_COLUMN','40'),
('NITF_CCS_ROW','422'),
('NITF_CCS_COLUMN','210'),
]
for item in tab:
if md[item[0]] != item[1]:
gdaltest.post_reason( '(2) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]) )
return 'fail'
ds = gdal.Open( 'NITF_IM:3:tmp/cache/U_0001C.NTF' )
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL','5'),
('NITF_IALVL','3'),
('NITF_ILOC_ROW','40'),
('NITF_ILOC_COLUMN','240'),
('NITF_CCS_ROW','-1'),
('NITF_CCS_COLUMN','-1'),
]
for item in tab:
if md[item[0]] != item[1]:
gdaltest.post_reason( '(3) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]) )
return 'fail'
ds = gdal.Open( 'NITF_IM:4:tmp/cache/U_0001C.NTF' )
md = ds.GetMetadata()
ds = None
tab = [
('NITF_IDLVL','1'),
('NITF_IALVL','0'),
('NITF_ILOC_ROW','65'),
('NITF_ILOC_COLUMN','30'),
('NITF_CCS_ROW','65'),
('NITF_CCS_COLUMN','30'),
]
for item in tab:
if md[item[0]] != item[1]:
gdaltest.post_reason( '(4) wrong value for %s, got %s instead of %s.'
% (item[0], md[item[0]], item[1]) )
return 'fail'
return 'success'
###############################################################################
# Test reading a M4 compressed file (fixed for #3848)
def nitf_online_23():
if not gdaltest.download_file('http://download.osgeo.org/gdal/data/nitf/nitf2.0/U_3058b.ntf', 'U_3058b.ntf'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/U_3058b.ntf', 1, 44748, filename_absolute = 1 )
return tst.testOpen()
###############################################################################
# Test reading ECRG frames
def nitf_online_24():
if not gdaltest.download_file('http://www.falconview.org/trac/FalconView/downloads/17', 'ECRG_Sample.zip'):
return 'skip'
try:
os.stat('tmp/cache/ECRG_Sample.zip')
except:
return 'skip'
oldval = gdal.GetConfigOption('NITF_OPEN_UNDERLYING_DS')
gdal.SetConfigOption('NITF_OPEN_UNDERLYING_DS', 'NO')
ds = gdal.Open('/vsizip/tmp/cache/ECRG_Sample.zip/ECRG_Sample/EPF/clfc/2/000000009s0013.lf2')
gdal.SetConfigOption('NITF_OPEN_UNDERLYING_DS', oldval)
if ds is None:
return 'fail'
xml_tre = ds.GetMetadata('xml:TRE')[0]
ds = None
if xml_tre.find('<tre name="GEOPSB"') == -1 or \
xml_tre.find('<tre name="J2KLRA"') == -1 or \
xml_tre.find('<tre name="GEOLOB"') == -1 or \
xml_tre.find('<tre name="BNDPLB"') == -1 or \
xml_tre.find('<tre name="ACCPOB"') == -1 or \
xml_tre.find('<tre name="SOURCB"') == -1:
gdaltest.post_reason('did not get expected xml:TRE')
print(xml_tre)
return 'fail'
return 'success'
###############################################################################
# Test reading a HRE file
def nitf_online_25():
if not gdaltest.download_file('http://www.gwg.nga.mil/ntb/baseline/docs/HRE_spec/Case1_HRE10G324642N1170747W_Uxx.hr5', 'Case1_HRE10G324642N1170747W_Uxx.hr5'):
return 'skip'
tst = gdaltest.GDALTest( 'NITF', 'tmp/cache/Case1_HRE10G324642N1170747W_Uxx.hr5', 1, 7099, filename_absolute = 1 )
ret = tst.testOpen()
if ret != 'success':
return ret
ds = gdal.Open('tmp/cache/Case1_HRE10G324642N1170747W_Uxx.hr5')
xml_tre = ds.GetMetadata('xml:TRE')[0]
ds = None
if xml_tre.find('<tre name="PIAPRD"') == -1:
gdaltest.post_reason('did not get expected xml:TRE')
print(xml_tre)
return 'fail'
return 'success'
###############################################################################
# Cleanup.
def nitf_cleanup():
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/test_create.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf9.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/test_13.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/test_29.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/test_29_copy.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf36.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf37.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf38.ntf' )
os.unlink( 'tmp/nitf38.ntf_0.ovr' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf39.ntf' )
except:
pass
try:
os.stat( 'tmp/nitf40.ntf' )
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf40.ntf' )
except:
pass
try:
os.stat( 'tmp/nitf42.ntf' )
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf42.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf44.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf45.ntf' )
os.unlink( 'tmp/nitf45.ntf_0.ovr' )
except:
pass
try:
os.stat( 'tmp/nitf46.ntf' )
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf46.ntf' )
os.unlink( 'tmp/nitf46.ntf_0.ovr' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf49.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf49_2.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf50.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf51.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf52.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf53.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf54.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf55.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf56.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf57.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf58.ntf' )
except:
pass
try:
os.remove('tmp/nitf59.hdr')
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf59.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf62.ntf' )
except:
pass
try:
gdal.GetDriverByName('NITF').Delete( 'tmp/nitf63.ntf' )
except:
pass
return 'success'
gdaltest_list = [
nitf_1,
nitf_2,
nitf_3,
nitf_4,
nitf_5,
nitf_6,
nitf_7,
nitf_8,
nitf_9,
nitf_10,
nitf_11,
nitf_12,
nitf_13,
nitf_14,
nitf_15,
nitf_16,
nitf_17,
nitf_18,
nitf_19,
nitf_20,
nitf_21,
nitf_22,
nitf_23,
nitf_24,
nitf_25,
nitf_26,
nitf_27,
nitf_28_jp2ecw,
nitf_28_jp2mrsid,
nitf_28_jp2kak,
nitf_29,
nitf_30,
nitf_31,
nitf_32,
nitf_33,
nitf_34,
nitf_35,
nitf_36,
nitf_37,
nitf_38,
nitf_39,
nitf_40,
nitf_41,
nitf_42,
nitf_43_jasper,
nitf_43_jp2ecw,
nitf_43_jp2kak,
nitf_44,
nitf_45,
#nitf_46_jp2ecw,
#nitf_46_jp2mrsid,
#nitf_46_jp2kak,
nitf_46_jasper,
#nitf_46_openjpeg,
nitf_47,
nitf_48,
nitf_49,
nitf_50,
nitf_51,
nitf_52,
nitf_53,
nitf_54,
nitf_55,
nitf_56,
nitf_57,
nitf_58,
nitf_59,
nitf_60,
nitf_61,
nitf_62,
nitf_63,
nitf_64,
nitf_65,
nitf_66,
nitf_67,
nitf_68,
nitf_69,
nitf_70,
nitf_71,
nitf_online_1,
nitf_online_2,
nitf_online_3,
nitf_online_4,
nitf_online_5,
nitf_online_6,
nitf_online_7,
nitf_online_8,
nitf_online_9,
nitf_online_10,
nitf_online_11,
nitf_online_12,
nitf_online_13,
nitf_online_14,
nitf_online_15_jp2ecw,
nitf_online_15_jp2mrsid,
nitf_online_15_jp2kak,
nitf_online_15_jasper,
nitf_online_15_openjpeg,
nitf_online_16_jp2ecw,
nitf_online_16_jp2mrsid,
nitf_online_16_jp2kak,
nitf_online_16_jasper,
nitf_online_16_openjpeg,
nitf_online_17_jp2ecw,
nitf_online_17_jp2mrsid,
nitf_online_17_jp2kak,
nitf_online_17_jasper,
nitf_online_17_openjpeg,
nitf_online_18,
nitf_online_19,
nitf_online_20,
nitf_online_21,
nitf_online_22,
nitf_online_23,
nitf_online_24,
nitf_online_25,
nitf_cleanup ]
if __name__ == '__main__':
gdaltest.setup_run( 'nitf' )
gdaltest.run_tests( gdaltest_list )
gdaltest.summarize()
|
gpl-2.0
|
klahnakoski/TestFailures
|
pyLibrary/queries/query.py
|
1
|
25581
|
# encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski ([email protected])
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from collections import Mapping
from copy import copy
from pyLibrary import convert
from pyLibrary.collections import AND, UNION
from pyLibrary.debugs.logs import Log
from pyLibrary.dot import coalesce, Null, set_default, unwraplist, literal_field
from pyLibrary.dot import wrap, unwrap, listwrap
from pyLibrary.dot.dicts import Dict
from pyLibrary.dot.lists import DictList
from pyLibrary.maths import Math
from pyLibrary.meta import use_settings
from pyLibrary.queries import Schema, wrap_from
from pyLibrary.queries.containers import Container, STRUCT
from pyLibrary.queries.dimensions import Dimension
from pyLibrary.queries.domains import Domain, is_keyword, SetDomain
from pyLibrary.queries.expressions import jx_expression, TrueOp, Expression, FalseOp, Variable, LeavesOp, ScriptOp, OffsetOp
DEFAULT_LIMIT = 10
MAX_LIMIT = 50000
_jx = None
_Column = None
def _late_import():
global _jx
global _Column
from pyLibrary.queries.meta import Column as _Column
from pyLibrary.queries import jx as _jx
_ = _jx
_ = _Column
class QueryOp(Expression):
__slots__ = ["frum", "select", "edges", "groupby", "where", "window", "sort", "limit", "having", "format", "isLean"]
def __new__(cls, op, frum, select=None, edges=None, groupby=None, window=None, where=None, sort=None, limit=None, format=None):
output = object.__new__(cls)
for s in QueryOp.__slots__:
setattr(output, s, None)
return output
def __init__(self, op, frum, select=None, edges=None, groupby=None, window=None, where=None, sort=None, limit=None, format=None):
if isinstance(frum, Container):
pass
else:
Expression.__init__(self, op, frum)
self.frum = frum
self.select = select
self.edges = edges
self.groupby = groupby
self.window = window
self.where = where
self.sort = sort
self.limit = limit
self.format = format
def to_sql(self, not_null=False, boolean=False):
raise Log.error("{{type}} has no `to_sql` method", type=self.__class__.__name__)
def to_dict(self):
def select_to_dict():
if isinstance(self.select, list):
return [s.to_dict() for s in self.select]
else:
return self.select.to_dict()
return {
"from": self.frum.to_dict(),
"select": select_to_dict(),
"edges": [e.to_dict() for e in self.edges],
"groupby": [g.to_dict() for g in self.groupby],
"window": [w.to_dict() for w in self.window],
"where": self.where.to_dict(),
"sort": self.sort.to_dict(),
"limit": self.limit.to_dict()
}
def __json__(self):
return convert.value2json(self.to_dict())
def vars(self, exclude_where=False, exclude_select=False):
"""
:return: variables in query
"""
def edges_get_all_vars(e):
output = set()
if isinstance(e.value, basestring):
output.add(e.value)
if e.domain.key:
output.add(e.domain.key)
if e.domain.where:
output |= jx_expression(e.domain.where).vars()
if e.range:
output |= jx_expression(e.range.min).vars()
output |= jx_expression(e.range.max).vars()
if e.domain.partitions:
for p in e.domain.partitions:
if p.where:
output |= p.where.vars()
return output
output = set()
try:
output |= self.frum.vars()
except Exception:
pass
if not exclude_select:
for s in listwrap(self.select):
output |= s.value.vars()
for s in listwrap(self.edges):
output |= edges_get_all_vars(s)
for s in listwrap(self.groupby):
output |= edges_get_all_vars(s)
if not exclude_where:
output |= self.where.vars()
for s in listwrap(self.sort):
output |= s.value.vars()
try:
output |= UNION(e.vars() for e in self.window)
except Exception:
pass
return output
def map(self, map_):
def map_select(s, map_):
return set_default(
{"value": s.value.map(map_)},
s
)
def map_edge(e, map_):
partitions = unwraplist([
set_default(
{"where": p.where.map(map_)},
p
)
for p in e.domain.partitions
])
domain = copy(e.domain)
domain.where = e.domain.where.map(map_)
domain.partitions = partitions
edge = copy(e)
edge.value = e.value.map(map_)
edge.domain = domain
if e.range:
edge.range.min = e.range.min.map(map_)
edge.range.max = e.range.max.map(map_)
return edge
if isinstance(self.select, list):
select = wrap([map_select(s, map_) for s in self.select])
else:
select = map_select(self.select, map_)
return QueryOp(
"from",
frum=self.frum.map(map_),
select=select,
edges=wrap([map_edge(e, map_) for e in self.edges]),
groupby=wrap([g.map(map_) for g in self.groupby]),
window=wrap([w.map(map_) for w in self.window]),
where=self.where.map(map_),
sort=wrap([map_select(s, map_) for s in listwrap(self.sort)]),
limit=self.limit,
format=self.format
)
def missing(self):
return FalseOp()
@staticmethod
def wrap(query, schema=None):
"""
NORMALIZE QUERY SO IT CAN STILL BE JSON
"""
if isinstance(query, QueryOp) or query == None:
return query
query = wrap(query)
output = QueryOp("from", None)
output.format = query.format
output.frum = wrap_from(query["from"], schema=schema)
if not schema and isinstance(output.frum, Schema):
schema = output.frum
if query.select or isinstance(query.select, (Mapping, list)):
output.select = _normalize_selects(query.select, query.frum, schema=schema)
else:
if query.edges or query.groupby:
output.select = Dict(name="count", value=jx_expression("."), aggregate="count", default=0)
else:
output.select = _normalize_selects(".", query.frum)
if query.groupby and query.edges:
Log.error("You can not use both the `groupby` and `edges` clauses in the same query!")
elif query.edges:
output.edges = _normalize_edges(query.edges, schema=schema)
output.groupby = Null
elif query.groupby:
output.edges = Null
output.groupby = _normalize_groupby(query.groupby, schema=schema)
else:
output.edges = Null
output.groupby = Null
output.where = _normalize_where(query.where, schema=schema)
output.window = [_normalize_window(w) for w in listwrap(query.window)]
output.having = None
output.sort = _normalize_sort(query.sort)
output.limit = Math.min(MAX_LIMIT, coalesce(query.limit, DEFAULT_LIMIT))
if not Math.is_integer(output.limit) or output.limit < 0:
Log.error("Expecting limit >= 0")
output.isLean = query.isLean
return output
@property
def columns(self):
return listwrap(self.select) + coalesce(self.edges, self.groupby)
@property
def query_path(self):
return "."
@property
def column_names(self):
return listwrap(self.select).name + self.edges.name + self.groupby.name
def __getitem__(self, item):
if item == "from":
return self.frum
return Dict.__getitem__(self, item)
def copy(self):
output = object.__new__(QueryOp)
for s in QueryOp.__slots__:
setattr(output, s, getattr(self, s))
return output
def as_dict(self):
output = wrap({s: getattr(self, s) for s in QueryOp.__slots__})
return output
canonical_aggregates = wrap({
"count": {"name": "count", "default": 0},
"min": {"name": "minimum"},
"max": {"name": "maximum"},
"add": {"name": "sum"},
"avg": {"name": "average"},
"mean": {"name": "average"},
})
def _normalize_selects(selects, frum, schema=None, ):
if frum == None or isinstance(frum, (list, set, unicode)):
if isinstance(selects, list):
if len(selects) == 0:
output = Dict()
return output
else:
output = [_normalize_select_no_context(s, schema=schema) for s in selects]
else:
return _normalize_select_no_context(selects)
elif isinstance(selects, list):
output = [ss for s in selects for ss in _normalize_select(s, frum=frum, schema=schema)]
else:
output = _normalize_select(selects, frum, schema=schema)
exists = set()
for s in output:
if s.name in exists:
Log.error("{{name}} has already been defined", name=s.name)
exists.add(s.name)
return output
def _normalize_select(select, frum, schema=None):
"""
:param select: ONE SELECT COLUMN
:param frum: TABLE TO get_columns()
:param schema: SCHEMA TO LOOKUP NAMES FOR DEFINITIONS
:return: AN ARRAY OF SELECT COLUMNS
"""
if not _Column:
_late_import()
if isinstance(select, basestring):
canonical = select = Dict(value=select)
else:
select = wrap(select)
canonical = select.copy()
canonical.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none")
canonical.default = coalesce(select.default, canonical_aggregates[canonical.aggregate].default)
if hasattr(unwrap(frum), "_normalize_select"):
return frum._normalize_select(canonical)
output = []
if not select.value or select.value == ".":
output.extend([
set_default(
{
"name": c.name,
"value": jx_expression(c.name)
},
canonical
)
for c in frum.get_leaves()
])
elif isinstance(select.value, basestring):
if select.value.endswith(".*"):
base_name = select.value[:-2]
canonical.name = coalesce(select.name, base_name, select.aggregate)
value = jx_expression(select[:-2])
if not isinstance(value, Variable):
Log.error("`*` over general expression not supported yet")
output.append([
set_default(
{
"name": base_name,
"value": LeavesOp("leaves", value),
"format": "dict" # MARKUP FOR DECODING
},
canonical
)
for c in frum.get_columns()
if c.type not in STRUCT
])
else:
output.extend([
set_default(
{
"name": base_name + "." + literal_field(c.name[len(base_name) + 1:]),
"value": jx_expression(c.name)
},
canonical
)
for c in frum.get_leaves()
if c.name.startswith(base_name+".")
])
else:
canonical.name = coalesce(select.name, select.value, select.aggregate)
canonical.value = jx_expression(select.value)
output.append(canonical)
output = wrap(output)
if any(n==None for n in output.name):
Log.error("expecting select to have a name: {{select}}", select=select)
return output
def _normalize_select_no_context(select, schema=None):
"""
SAME NORMALIZE, BUT NO SOURCE OF COLUMNS
"""
if not _Column:
_late_import()
if isinstance(select, basestring):
select = Dict(value=select)
else:
select = wrap(select)
output = select.copy()
if not select.value:
output.name = coalesce(select.name, select.aggregate)
if output.name:
output.value = jx_expression(".")
else:
return output
elif isinstance(select.value, basestring):
if select.value.endswith(".*"):
output.name = coalesce(select.name, select.value[:-2], select.aggregate)
output.value = LeavesOp("leaves", Variable(select.value[:-2]))
else:
if select.value == ".":
output.name = coalesce(select.name, select.aggregate, ".")
output.value = jx_expression(select.value)
elif select.value == "*":
output.name = coalesce(select.name, select.aggregate, ".")
output.value = LeavesOp("leaves", Variable("."))
else:
output.name = coalesce(select.name, select.value, select.aggregate)
output.value = jx_expression(select.value)
else:
output.value = jx_expression(select.value)
if not output.name:
Log.error("expecting select to have a name: {{select}}", select= select)
if output.name.endswith(".*"):
Log.error("{{name|quote}} is invalid select", name=output.name)
output.aggregate = coalesce(canonical_aggregates[select.aggregate].name, select.aggregate, "none")
output.default = coalesce(select.default, canonical_aggregates[output.aggregate].default)
return output
def _normalize_edges(edges, schema=None):
return wrap([_normalize_edge(e, schema=schema) for e in listwrap(edges)])
def _normalize_edge(edge, schema=None):
if not _Column:
_late_import()
if edge == None:
Log.error("Edge has no value, or expression is empty")
elif isinstance(edge, basestring):
if schema:
try:
e = schema[edge]
except Exception, e:
e = None
e = unwraplist(e)
if e and not isinstance(e, (_Column, set, list)):
if isinstance(e, _Column):
return Dict(
name=edge,
value=jx_expression(edge),
allowNulls=True,
domain=_normalize_domain(domain=e, schema=schema)
)
elif isinstance(e.fields, list) and len(e.fields) == 1:
return Dict(
name=e.name,
value=jx_expression(e.fields[0]),
allowNulls=True,
domain=e.getDomain()
)
else:
return Dict(
name=e.name,
allowNulls=True,
domain=e.getDomain()
)
return Dict(
name=edge,
value=jx_expression(edge),
allowNulls=True,
domain=_normalize_domain(schema=schema)
)
else:
edge = wrap(edge)
if not edge.name and not isinstance(edge.value, basestring):
Log.error("You must name compound and complex edges: {{edge}}", edge=edge)
if isinstance(edge.value, (list, set)) and not edge.domain:
# COMPLEX EDGE IS SHORT HAND
domain = _normalize_domain(schema=schema)
domain.dimension = Dict(fields=edge.value)
return Dict(
name=edge.name,
value=jx_expression(edge.value),
allowNulls=bool(coalesce(edge.allowNulls, True)),
domain=domain
)
domain = _normalize_domain(edge.domain, schema=schema)
return Dict(
name=coalesce(edge.name, edge.value),
value=jx_expression(edge.value),
range=_normalize_range(edge.range),
allowNulls=bool(coalesce(edge.allowNulls, True)),
domain=domain
)
def _normalize_groupby(groupby, schema=None):
if groupby == None:
return None
output = wrap([_normalize_group(e, schema=schema) for e in listwrap(groupby)])
if any(o==None for o in output):
Log.error("not expected")
return output
def _normalize_group(edge, schema=None):
if isinstance(edge, basestring):
return wrap({
"name": edge,
"value": jx_expression(edge),
"allowNulls": False,
"domain": {"type": "default"}
})
else:
edge = wrap(edge)
if (edge.domain and edge.domain.type != "default") or edge.allowNulls != None:
Log.error("groupby does not accept complicated domains")
if not edge.name and not isinstance(edge.value, basestring):
Log.error("You must name compound edges: {{edge}}", edge= edge)
return wrap({
"name": coalesce(edge.name, edge.value),
"value": jx_expression(edge.value),
"allowNulls": True,
"domain": {"type": "default"}
})
def _normalize_domain(domain=None, schema=None):
if not domain:
return Domain(type="default")
elif isinstance(domain, _Column):
if domain.partitions:
return SetDomain(**domain)
elif isinstance(domain, Dimension):
return domain.getDomain()
elif schema and isinstance(domain, basestring) and schema[domain]:
return schema[domain].getDomain()
elif isinstance(domain, Domain):
return domain
if not domain.name:
domain = domain.copy()
domain.name = domain.type
return Domain(**domain)
def _normalize_window(window, schema=None):
v = window.value
try:
expr = jx_expression(v)
except Exception:
expr = ScriptOp("script", v)
return Dict(
name=coalesce(window.name, window.value),
value=expr,
edges=[_normalize_edge(e, schema) for e in listwrap(window.edges)],
sort=_normalize_sort(window.sort),
aggregate=window.aggregate,
range=_normalize_range(window.range),
where=_normalize_where(window.where, schema=schema)
)
def _normalize_range(range):
if range == None:
return None
return Dict(
min=None if range.min == None else jx_expression(range.min),
max=None if range.max == None else jx_expression(range.max),
mode=range.mode
)
def _normalize_where(where, schema=None):
if where == None:
return TrueOp()
return jx_expression(where)
def _map_term_using_schema(master, path, term, schema_edges):
"""
IF THE WHERE CLAUSE REFERS TO FIELDS IN THE SCHEMA, THEN EXPAND THEM
"""
output = DictList()
for k, v in term.items():
dimension = schema_edges[k]
if isinstance(dimension, Dimension):
domain = dimension.getDomain()
if dimension.fields:
if isinstance(dimension.fields, Mapping):
# EXPECTING A TUPLE
for local_field, es_field in dimension.fields.items():
local_value = v[local_field]
if local_value == None:
output.append({"missing": {"field": es_field}})
else:
output.append({"term": {es_field: local_value}})
continue
if len(dimension.fields) == 1 and is_keyword(dimension.fields[0]):
# SIMPLE SINGLE-VALUED FIELD
if domain.getPartByKey(v) is domain.NULL:
output.append({"missing": {"field": dimension.fields[0]}})
else:
output.append({"term": {dimension.fields[0]: v}})
continue
if AND(is_keyword(f) for f in dimension.fields):
# EXPECTING A TUPLE
if not isinstance(v, tuple):
Log.error("expecing {{name}}={{value}} to be a tuple", name= k, value= v)
for i, f in enumerate(dimension.fields):
vv = v[i]
if vv == None:
output.append({"missing": {"field": f}})
else:
output.append({"term": {f: vv}})
continue
if len(dimension.fields) == 1 and is_keyword(dimension.fields[0]):
if domain.getPartByKey(v) is domain.NULL:
output.append({"missing": {"field": dimension.fields[0]}})
else:
output.append({"term": {dimension.fields[0]: v}})
continue
if domain.partitions:
part = domain.getPartByKey(v)
if part is domain.NULL or not part.esfilter:
Log.error("not expected to get NULL")
output.append(part.esfilter)
continue
else:
Log.error("not expected")
elif isinstance(v, Mapping):
sub = _map_term_using_schema(master, path + [k], v, schema_edges[k])
output.append(sub)
continue
output.append({"term": {k: v}})
return {"and": output}
def _where_terms(master, where, schema):
"""
USE THE SCHEMA TO CONVERT DIMENSION NAMES TO ES FILTERS
master - TOP LEVEL WHERE (FOR PLACING NESTED FILTERS)
"""
if isinstance(where, Mapping):
if where.term:
# MAP TERM
try:
output = _map_term_using_schema(master, [], where.term, schema.edges)
return output
except Exception, e:
Log.error("programmer problem?", e)
elif where.terms:
# MAP TERM
output = DictList()
for k, v in where.terms.items():
if not isinstance(v, (list, set)):
Log.error("terms filter expects list of values")
edge = schema.edges[k]
if not edge:
output.append({"terms": {k: v}})
else:
if isinstance(edge, basestring):
# DIRECT FIELD REFERENCE
return {"terms": {edge: v}}
try:
domain = edge.getDomain()
except Exception, e:
Log.error("programmer error", e)
fields = domain.dimension.fields
if isinstance(fields, Mapping):
or_agg = []
for vv in v:
and_agg = []
for local_field, es_field in fields.items():
vvv = vv[local_field]
if vvv != None:
and_agg.append({"term": {es_field: vvv}})
or_agg.append({"and": and_agg})
output.append({"or": or_agg})
elif isinstance(fields, list) and len(fields) == 1 and is_keyword(fields[0]):
output.append({"terms": {fields[0]: v}})
elif domain.partitions:
output.append({"or": [domain.getPartByKey(vv).esfilter for vv in v]})
return {"and": output}
elif where["or"]:
return {"or": [unwrap(_where_terms(master, vv, schema)) for vv in where["or"]]}
elif where["and"]:
return {"and": [unwrap(_where_terms(master, vv, schema)) for vv in where["and"]]}
elif where["not"]:
return {"not": unwrap(_where_terms(master, where["not"], schema))}
return where
def _normalize_sort(sort=None):
"""
CONVERT SORT PARAMETERS TO A NORMAL FORM SO EASIER TO USE
"""
if sort==None:
return DictList.EMPTY
output = DictList()
for s in listwrap(sort):
if isinstance(s, basestring):
output.append({"value": jx_expression(s), "sort": 1})
elif isinstance(s, Expression):
output.append({"value": s, "sort": 1})
elif Math.is_integer(s):
output.append({"value": OffsetOp("offset", s), "sort": 1})
elif all(d in sort_direction for d in s.values()) and not s.sort and not s.value:
for v, d in s.items():
output.append({"value": jx_expression(v), "sort": -1})
else:
output.append({"value": jx_expression(coalesce(s.value, s.field)), "sort": coalesce(sort_direction[s.sort], 1)})
return output
sort_direction = {
"asc": 1,
"desc": -1,
"none": 0,
1: 1,
0: 0,
-1: -1,
None: 1,
Null: 1
}
|
mpl-2.0
|
rekbun/browserscope
|
test/test_decorators.py
|
9
|
1678
|
#!/usr/bin/python2.5
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Shared Models Unit Tests."""
__author__ = '[email protected] (Lindsey Simon)'
import unittest
import logging
from google.appengine.ext import db
from django.test.client import Client
from django import http
import urls
import settings
from base import decorators
class TestDecorators(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_provide_csrf(self):
params = {
'return_csrf': 0
}
response = self.client.get('/get_csrf', params)
self.assertEquals(response.content, 'True')
def test_check_csrf_with_token(self):
params = {
'csrf_token': self.client.get('/get_csrf').content
}
response = self.client.get('/fake_check_csrf', params)
self.assertEquals(200, response.status_code)
# Second time shouldn't work with the same token.
response = self.client.get('/fake_check_csrf', params)
self.assertNotEquals(200, response.status_code)
def test_check_csrf_without_token(self):
response = self.client.get('/fake_check_csrf')
self.assertNotEquals(200, response.status_code)
|
apache-2.0
|
30loops/libthirty
|
libthirty/state.py
|
1
|
4469
|
# Copyright (c) 2011-2012, 30loops.net
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of 30loops.net nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL 30loops.net BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Environment dictionary - support structures"""
class _AttributeDict(dict):
"""
Dictionary subclass enabling attribute lookup/assignment of keys/values.
For example::
>>> m = _AttributeDict({'foo': 'bar'})
>>> m.foo
'bar'
>>> m.foo = 'not bar'
>>> m['foo']
'not bar'
``_AttributeDict`` objects also provide ``.first()`` which acts like
``.get()`` but accepts multiple keys as arguments, and returns the value of
the first hit, e.g.::
>>> m = _AttributeDict({'foo': 'bar', 'biz': 'baz'})
>>> m.first('wrong', 'incorrect', 'foo', 'biz')
'bar'
"""
def __getattr__(self, key):
try:
return self[key]
except KeyError:
# to conform with __getattr__ spec
raise AttributeError(key)
def __setattr__(self, key, value):
self[key] = value
def first(self, *names):
for name in names:
value = self.get(name)
if value:
return value
# Global config dictionary. Stores the global state.
env = _AttributeDict({
'base_uri': 'https://api.30loops.net',
'api_version': '0.9',
'account': None,
'service': None,
'appname': None,
'username': None,
'password': None
})
def uri(
base_uri=None,
api_version=None,
account=None):
"""Compose the base uri."""
if not base_uri:
base_uri = env.base_uri
if not api_version:
api_version = env.api_version
if not account:
account = env.account
path = []
path.append(api_version.strip('/'))
if not isinstance(account, type(None)):
path.append(account.strip('/'))
return "%s/%s" % (base_uri.strip('/'), '/'.join(path))
def app_uri(
base_uri=None,
api_version=None,
account=None,
appname=None):
"""Compose the app uri."""
if not appname:
appname = env.appname
return "%s/apps/%s" % (uri(base_uri, api_version, account), appname)
def service_uri(
base_uri=None,
api_version=None,
account=None,
appname=None,
service=None):
"""Compose as service uri."""
if not service:
service = env.service
return "%s/services/%s" % (app_uri(base_uri, api_version, account, appname),
service)
def resource_collection_uri(
base_uri=None,
api_version=None,
account=None,
label=None):
"""Return the URI of a resource as a string."""
if not base_uri:
base_uri = env.base_uri
if not api_version:
api_version = env.api_version
if not account:
account = env.account
if not label:
label = env.label
path = []
path.append(api_version.strip('/'))
path.append(account.strip('/'))
path.append(label.strip('/'))
return "%s/%s" % (base_uri.strip('/'), '/'.join(path))
|
bsd-3-clause
|
clems/openfisca
|
src/views/ui_page02.py
|
1
|
22628
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui/page02.ui'
#
# Created: Tue May 29 19:14:52 2012
# by: PyQt4 UI code generator 4.8.5
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_Page02(object):
def setupUi(self, Page02):
Page02.setObjectName(_fromUtf8("Page02"))
Page02.resize(730, 564)
Page02.setMinimumSize(QtCore.QSize(730, 0))
Page02.setMaximumSize(QtCore.QSize(730, 16777215))
Page02.setWindowTitle(QtGui.QApplication.translate("Page02", "Form", None, QtGui.QApplication.UnicodeUTF8))
Page02.setStyleSheet(_fromUtf8(""))
self.gridLayout_2 = QtGui.QGridLayout(Page02)
self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2"))
self.label_2 = QtGui.QLabel(Page02)
self.label_2.setStyleSheet(_fromUtf8(""))
self.label_2.setText(QtGui.QApplication.translate("Page02", "A| SITUATION DU FOYER FISCAL", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setProperty("class", QtGui.QApplication.translate("Page02", "titreA", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout_2.addWidget(self.label_2, 0, 0, 1, 1)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.label_81 = QtGui.QLabel(Page02)
self.label_81.setText(QtGui.QApplication.translate("Page02", "Situation pouvant donner droit à une demi-part supplémentaire", None, QtGui.QApplication.UnicodeUTF8))
self.label_81.setProperty("class", QtGui.QApplication.translate("Page02", "titreB", None, QtGui.QApplication.UnicodeUTF8))
self.label_81.setObjectName(_fromUtf8("label_81"))
self.gridLayout.addWidget(self.label_81, 0, 0, 1, 1)
self.label_12 = QtGui.QLabel(Page02)
self.label_12.setStyleSheet(_fromUtf8(""))
self.label_12.setText(QtGui.QApplication.translate("Page02", "1. En cas de célibat, divorce, séparation, veuvage", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setProperty("class", QtGui.QApplication.translate("Page02", "titreC", None, QtGui.QApplication.UnicodeUTF8))
self.label_12.setObjectName(_fromUtf8("label_12"))
self.gridLayout.addWidget(self.label_12, 1, 0, 1, 1)
self.label_13 = QtGui.QLabel(Page02)
self.label_13.setText(QtGui.QApplication.translate("Page02", "- Vous vivez seule, et : ", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_13.setObjectName(_fromUtf8("label_13"))
self.gridLayout.addWidget(self.label_13, 2, 0, 1, 1)
self.label_14 = QtGui.QLabel(Page02)
self.label_14.setText(QtGui.QApplication.translate("Page02", "- vos enfant (majeurs ou mariés/pacsés; mineurs imposés en leur nom propre) ne sont pas comptés \n"
"à votre charge ou n\'ont pas demandé le rattachement à votre foyer", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_14.setObjectName(_fromUtf8("label_14"))
self.gridLayout.addWidget(self.label_14, 3, 0, 1, 1)
self.label_30 = QtGui.QLabel(Page02)
self.label_30.setMinimumSize(QtCore.QSize(27, 20))
self.label_30.setMaximumSize(QtCore.QSize(27, 20))
self.label_30.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_30.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">E</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_30.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_30.setObjectName(_fromUtf8("label_30"))
self.gridLayout.addWidget(self.label_30, 3, 2, 1, 1)
self.caseE = QtGui.QCheckBox(Page02)
self.caseE.setText(_fromUtf8(""))
self.caseE.setObjectName(_fromUtf8("caseE"))
self.gridLayout.addWidget(self.caseE, 3, 3, 1, 1)
self.label_15 = QtGui.QLabel(Page02)
self.label_15.setText(QtGui.QApplication.translate("Page02", "- vous avez eu un enfant décédé après l’âge de 16 ans ou par suite de faits de guerre", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_15.setObjectName(_fromUtf8("label_15"))
self.gridLayout.addWidget(self.label_15, 4, 0, 1, 1)
self.label_31 = QtGui.QLabel(Page02)
self.label_31.setMinimumSize(QtCore.QSize(27, 20))
self.label_31.setMaximumSize(QtCore.QSize(27, 20))
self.label_31.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_31.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">K</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_31.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_31.setObjectName(_fromUtf8("label_31"))
self.gridLayout.addWidget(self.label_31, 4, 2, 1, 1)
self.caseK = QtGui.QCheckBox(Page02)
self.caseK.setText(_fromUtf8(""))
self.caseK.setObjectName(_fromUtf8("caseK"))
self.gridLayout.addWidget(self.caseK, 4, 3, 1, 1)
self.label_16 = QtGui.QLabel(Page02)
self.label_16.setText(QtGui.QApplication.translate("Page02", "- Vous vivez seul(e) et vous avez élevé vos enfants remplissant l\'une des conditions \n"
"ci-dessus pendant au moins cinq années au cours desquelles vous viviez seul(e) ", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_16.setObjectName(_fromUtf8("label_16"))
self.gridLayout.addWidget(self.label_16, 5, 0, 1, 1)
self.label_32 = QtGui.QLabel(Page02)
self.label_32.setMinimumSize(QtCore.QSize(27, 20))
self.label_32.setMaximumSize(QtCore.QSize(27, 20))
self.label_32.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_32.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">L</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_32.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_32.setObjectName(_fromUtf8("label_32"))
self.gridLayout.addWidget(self.label_32, 5, 2, 1, 1)
self.caseL = QtGui.QCheckBox(Page02)
self.caseL.setText(_fromUtf8(""))
self.caseL.setObjectName(_fromUtf8("caseL"))
self.gridLayout.addWidget(self.caseL, 5, 3, 1, 1)
self.label_17 = QtGui.QLabel(Page02)
self.label_17.setText(QtGui.QApplication.translate("Page02", "- Vous ne vivez pas seule", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout.addWidget(self.label_17, 6, 0, 1, 1)
self.label_33 = QtGui.QLabel(Page02)
self.label_33.setMinimumSize(QtCore.QSize(27, 20))
self.label_33.setMaximumSize(QtCore.QSize(27, 20))
self.label_33.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_33.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">N</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_33.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_33.setObjectName(_fromUtf8("label_33"))
self.gridLayout.addWidget(self.label_33, 6, 2, 1, 1)
self.caseN = QtGui.QCheckBox(Page02)
self.caseN.setText(_fromUtf8(""))
self.caseN.setObjectName(_fromUtf8("caseN"))
self.gridLayout.addWidget(self.caseN, 6, 3, 1, 1)
self.label_18 = QtGui.QLabel(Page02)
self.label_18.setText(QtGui.QApplication.translate("Page02", "2. Titulaire d\'une pension (militaire, accident du travail) pour une invalidité d\'au moins 40 % \n"
"ou d\'une carte d\'invalidité d\'au moins 80%", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setProperty("class", QtGui.QApplication.translate("Page02", "titreC", None, QtGui.QApplication.UnicodeUTF8))
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout.addWidget(self.label_18, 7, 0, 1, 1)
self.label_34 = QtGui.QLabel(Page02)
self.label_34.setMinimumSize(QtCore.QSize(27, 20))
self.label_34.setMaximumSize(QtCore.QSize(27, 20))
self.label_34.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_34.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">P</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_34.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_34.setObjectName(_fromUtf8("label_34"))
self.gridLayout.addWidget(self.label_34, 7, 2, 1, 1)
self.caseP = QtGui.QCheckBox(Page02)
self.caseP.setText(_fromUtf8(""))
self.caseP.setObjectName(_fromUtf8("caseP"))
self.gridLayout.addWidget(self.caseP, 7, 3, 1, 1)
self.label_19 = QtGui.QLabel(Page02)
self.label_19.setText(QtGui.QApplication.translate("Page02", "Votre conjoint remplit ces conditions, ou votre conjoint, décédé en 2009, remplissait ces conditions", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout.addWidget(self.label_19, 8, 0, 1, 1)
self.label_35 = QtGui.QLabel(Page02)
self.label_35.setMinimumSize(QtCore.QSize(27, 20))
self.label_35.setMaximumSize(QtCore.QSize(27, 20))
self.label_35.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_35.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">F</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_35.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_35.setObjectName(_fromUtf8("label_35"))
self.gridLayout.addWidget(self.label_35, 8, 2, 1, 1)
self.caseF = QtGui.QCheckBox(Page02)
self.caseF.setText(_fromUtf8(""))
self.caseF.setObjectName(_fromUtf8("caseF"))
self.gridLayout.addWidget(self.caseF, 8, 3, 1, 1)
self.label_20 = QtGui.QLabel(Page02)
self.label_20.setText(QtGui.QApplication.translate("Page02", "3. Titulaire de la carte du combattant ou d’une pension militaire d’invalidité ou de victime de guerre :", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setProperty("class", QtGui.QApplication.translate("Page02", "titreC", None, QtGui.QApplication.UnicodeUTF8))
self.label_20.setObjectName(_fromUtf8("label_20"))
self.gridLayout.addWidget(self.label_20, 9, 0, 1, 2)
self.label_21 = QtGui.QLabel(Page02)
self.label_21.setText(QtGui.QApplication.translate("Page02", "Vous êtes célibataire, divorcé, séparé, veuf et :\n"
"– vous avez plus de 75 ans et vous remplissez ces conditions ;\n"
"– vous avez plus de 75 ans et votre conjoint, décédé, remplissait ces conditions ;\n"
"– votre conjoint, âgé de plus de 75 ans, décédé en 2009, remplissait ces conditions", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_21.setObjectName(_fromUtf8("label_21"))
self.gridLayout.addWidget(self.label_21, 10, 0, 1, 1)
self.label_36 = QtGui.QLabel(Page02)
self.label_36.setMinimumSize(QtCore.QSize(27, 20))
self.label_36.setMaximumSize(QtCore.QSize(27, 20))
self.label_36.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_36.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">W</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_36.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_36.setObjectName(_fromUtf8("label_36"))
self.gridLayout.addWidget(self.label_36, 10, 2, 1, 1)
self.caseW = QtGui.QCheckBox(Page02)
self.caseW.setText(_fromUtf8(""))
self.caseW.setObjectName(_fromUtf8("caseW"))
self.gridLayout.addWidget(self.caseW, 10, 3, 1, 1)
self.label_22 = QtGui.QLabel(Page02)
self.label_22.setText(QtGui.QApplication.translate("Page02", "Vous avez une pension de veuve de guerre", None, QtGui.QApplication.UnicodeUTF8))
self.label_22.setProperty("class", QtGui.QApplication.translate("Page02", "texte01", None, QtGui.QApplication.UnicodeUTF8))
self.label_22.setObjectName(_fromUtf8("label_22"))
self.gridLayout.addWidget(self.label_22, 11, 0, 1, 1)
self.label_37 = QtGui.QLabel(Page02)
self.label_37.setMinimumSize(QtCore.QSize(27, 20))
self.label_37.setMaximumSize(QtCore.QSize(27, 20))
self.label_37.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_37.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">G</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_37.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.label_37.setObjectName(_fromUtf8("label_37"))
self.gridLayout.addWidget(self.label_37, 11, 2, 1, 1)
self.caseG = QtGui.QCheckBox(Page02)
self.caseG.setText(_fromUtf8(""))
self.caseG.setObjectName(_fromUtf8("caseG"))
self.gridLayout.addWidget(self.caseG, 11, 3, 1, 1)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem, 3, 1, 1, 1)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem1, 4, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem2, 5, 1, 1, 1)
spacerItem3 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem3, 6, 1, 1, 1)
spacerItem4 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem4, 7, 1, 1, 1)
spacerItem5 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem5, 8, 1, 1, 1)
spacerItem6 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem6, 10, 1, 1, 1)
spacerItem7 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout.addItem(spacerItem7, 11, 1, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout, 1, 0, 1, 2)
self.label_4 = QtGui.QLabel(Page02)
font = QtGui.QFont()
font.setPointSize(10)
self.label_4.setFont(font)
self.label_4.setStyleSheet(_fromUtf8(""))
self.label_4.setText(QtGui.QApplication.translate("Page02", "B| PARENT ISOLE", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setProperty("class", QtGui.QApplication.translate("Page02", "titreA", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout_2.addWidget(self.label_4, 2, 0, 1, 2)
self.gridLayout_10 = QtGui.QGridLayout()
self.gridLayout_10.setObjectName(_fromUtf8("gridLayout_10"))
self.label_25 = QtGui.QLabel(Page02)
self.label_25.setText(QtGui.QApplication.translate("Page02", "Vous êtes célibataire, divorcé(e), séparé(e) et vous vivez seul(e)\n"
"avec votre (ou vos) enfant(s) ou des personnes invalides recueillies sous votre toit", None, QtGui.QApplication.UnicodeUTF8))
self.label_25.setObjectName(_fromUtf8("label_25"))
self.gridLayout_10.addWidget(self.label_25, 0, 0, 1, 1)
spacerItem8 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.gridLayout_10.addItem(spacerItem8, 0, 1, 1, 1)
self.label_40 = QtGui.QLabel(Page02)
self.label_40.setMinimumSize(QtCore.QSize(27, 20))
self.label_40.setMaximumSize(QtCore.QSize(27, 20))
self.label_40.setStyleSheet(_fromUtf8("font: 75 10pt \"Arial\";"))
self.label_40.setText(QtGui.QApplication.translate("Page02", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'Arial\'; font-size:10pt; font-weight:72; font-style:normal;\">\n"
"<p align=\"right\" style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-weight:600;\">T</span></p></body></html>", None, QtGui.QApplication.UnicodeUTF8))
self.label_40.setObjectName(_fromUtf8("label_40"))
self.gridLayout_10.addWidget(self.label_40, 0, 2, 1, 1)
self.caseT = QtGui.QCheckBox(Page02)
self.caseT.setText(_fromUtf8(""))
self.caseT.setObjectName(_fromUtf8("caseT"))
self.gridLayout_10.addWidget(self.caseT, 0, 3, 1, 1)
self.gridLayout_2.addLayout(self.gridLayout_10, 3, 0, 1, 2)
spacerItem9 = QtGui.QSpacerItem(20, 38, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_2.addItem(spacerItem9, 4, 0, 1, 2)
self.retranslateUi(Page02)
QtCore.QMetaObject.connectSlotsByName(Page02)
Page02.setTabOrder(self.caseE, self.caseK)
Page02.setTabOrder(self.caseK, self.caseL)
Page02.setTabOrder(self.caseL, self.caseN)
Page02.setTabOrder(self.caseN, self.caseP)
Page02.setTabOrder(self.caseP, self.caseF)
Page02.setTabOrder(self.caseF, self.caseW)
Page02.setTabOrder(self.caseW, self.caseG)
def retranslateUi(self, Page02):
pass
|
gpl-3.0
|
kivatu/kivy_old
|
kivy/uix/checkbox.py
|
5
|
3103
|
'''
CheckBox
========
.. versionadded:: 1.4.0
.. image:: images/checkbox.png
:align: right
:class:`CheckBox` is a specific two-state button that can be either checked or
unchecked. If the CheckBox is in a Group, it becomes a Radio button.
As with the :class:`~kivy.uix.togglebutton.ToggleButton`, only one Radio button
at a time can be selected when the :data:`CheckBox.group` is set.
An example usage::
from kivy.uix.checkbox import CheckBox
# ...
def on_checkbox_active(checkbox, value):
if value:
print('The checkbox', checkbox, 'is active')
else:
print('The checkbox', checkbox, 'is inactive')
checkbox = CheckBox()
checkbox.bind(active=on_checkbox_active)
'''
__all__ = ('CheckBox', )
from weakref import ref
from kivy.uix.widget import Widget
from kivy.properties import BooleanProperty, ObjectProperty
class CheckBox(Widget):
'''CheckXox class, see module documentation for more information.
'''
active = BooleanProperty(False)
'''Indicates if the switch is active or inactive.
:data:`active` is a :class:`~kivy.properties.BooleanProperty` and defaults
to False.
'''
__groups = {}
group = ObjectProperty(None, allownone=True)
'''Group of the checkbox. If None, no group will be used (the checkbox is
independent). If specified, the :data:`group` must be a hashable object
such as a string. Only one checkbox in a group can be active.
:data:`group` is an :class:`~kivy.properties.ObjectProperty` and defaults to
None.
'''
def __init__(self, **kwargs):
self._previous_group = None
super(CheckBox, self).__init__(**kwargs)
def on_group(self, *largs):
groups = CheckBox.__groups
if self._previous_group:
group = groups[self._previous_group]
for item in group[:]:
if item() is self:
group.remove(item)
break
group = self._previous_group = self.group
if group not in groups:
groups[group] = []
r = ref(self, CheckBox._clear_groups)
groups[group].append(r)
def _release_group(self, current):
if self.group is None:
return
group = self.__groups[self.group]
for item in group[:]:
widget = item()
if widget is None:
group.remove(item)
if widget is current:
continue
widget.active = False
def _toggle_active(self):
self._release_group(self)
self.active = not self.active
def on_touch_down(self, touch):
if not self.collide_point(*touch.pos):
return
if self.disabled:
return True
self._toggle_active()
return True
@staticmethod
def _clear_groups(wk):
# auto flush the element when the weak reference have been deleted
groups = CheckBox.__groups
for group in list(groups.values()):
if wk in group:
group.remove(wk)
break
|
mit
|
pfnet/chainer
|
chainer/training/extensions/polynomial_shift.py
|
10
|
3354
|
from __future__ import division
import numpy
from chainer.training import extension
class PolynomialShift(extension.Extension):
"""Trainer extension to polynomially shift an optimizer attribute.
This extension polynomially decreases the specified attribute of the
optimizer. The typical use case is a polynomial decay of the
learning rate at each iteration.
For example, suppose that this extension is invoke at every iteration.
Then this extension will set the corresponding attribute to
``init_value * (1 - i / max_iter) ^ rate`` at the ``i``-th iteration, where
the ``max_iter`` is the number of iterations to be running.
This extension is also called before the training loop starts by default.
Args:
attr (str): Name of the attribute to shift.
rate (float): Exponent of polynomial shift.
max_count (int): Number of this extension to be invoked.
init (float): Initial value of the attribute. If it is ``None``, the
extension extracts the attribute at the first call and uses it as
the initial value.
target (float): Target value of the attribute. If the attribute reaches
this value, the shift stops.
optimizer (~chainer.Optimizer): Target optimizer to adjust the
attribute. If it is ``None``, the main optimizer of the updater is
used.
"""
invoke_before_training = True
def __init__(self, attr, rate, max_count, init=None, target=None,
optimizer=None):
self._attr = attr
self._rate = rate
self._init = init
self._target = target
self._optimizer = optimizer
self._t = 0
self._max_count = max_count
self._last_value = None
def initialize(self, trainer):
optimizer = self._get_optimizer(trainer)
# ensure that _init is set
if self._init is None:
self._init = getattr(optimizer, self._attr)
if self._last_value is not None: # resuming from a snapshot
self._update_value(optimizer, self._last_value)
else:
self._update_value(optimizer, self._init)
def __call__(self, trainer):
self._t += 1
optimizer = self._get_optimizer(trainer)
decay = max(1 - self._t / self._max_count, 0)
value = self._init * decay ** self._rate
if self._target is not None:
if self._rate > 0:
# almost same as value = min(value, self._target), but this
# line supports negative values, too
if self._target / value > 1:
value = self._target
else:
# ditto
if self._target / value < 1:
value = self._target
self._update_value(optimizer, value)
def serialize(self, serializer):
self._t = serializer('_t', self._t)
self._last_value = serializer('_last_value', self._last_value)
if isinstance(self._last_value, numpy.ndarray):
self._last_value = self._last_value.item()
def _get_optimizer(self, trainer):
return self._optimizer or trainer.updater.get_optimizer('main')
def _update_value(self, optimizer, value):
setattr(optimizer, self._attr, value)
self._last_value = value
|
mit
|
fbsder/zephyr
|
doc/conf.py
|
1
|
12262
|
# -*- coding: utf-8 -*-
#
# Zephyr documentation build configuration file, created by
# sphinx-quickstart on Fri May 8 11:43:01 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc', 'breathe', 'sphinx.ext.todo',
'sphinx.ext.extlinks'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Zephyr Project'
copyright = u'2015-2017 Zephyr Project members and individual contributors.'
author = u'many'
# The following code tries to extract the information by reading the Makefile,
# when Sphinx is run directly (e.g. by Read the Docs).
try:
makefile_version_major = None
makefile_version_minor = None
makefile_patchlevel = None
for line in open('../Makefile'):
key, val = [x.strip() for x in line.split('=', 2)]
if key == 'VERSION_MAJOR':
makefile_version_major = val
if key == 'VERSION_MINOR':
makefile_version_minor = val
elif key == 'PATCHLEVEL':
makefile_patchlevel = val
if makefile_version_major and makefile_version_minor and makefile_patchlevel:
break
except:
pass
finally:
if makefile_version_major and makefile_version_minor and makefile_patchlevel:
version = release = makefile_version_major + '.' + makefile_version_minor + '.' + makefile_patchlevel
else:
sys.stderr.write('Warning: Could not extract kernel version\n')
version = release = "unknown version"
version = release = os.getenv('KERNELVERSION','0.1.0')
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
rst_epilog = """
.. |codename| replace:: Zephyr Kernel
.. |project| replace:: Zephyr Project
.. |copy| unicode:: U+000A9 .. COPYRIGHT SIGN
:ltrim:
.. |trade| unicode:: U+02122 .. TRADEMARK SIGN
:ltrim:
.. |reg| unicode:: U+000AE .. REGISTERED TRADEMARK SIGN
:ltrim:
.. |deg| unicode:: U+000B0 .. DEGREE SIGN
:ltrim:
.. |plusminus| unicode:: U+000B1 .. PLUS-MINUS SIGN
:rtrim:
.. |micro| unicode:: U+000B5 .. MICRO SIGN
:rtrim:
"""
# -- Options for HTML output ----------------------------------------------
try:
import sphinx_rtd_theme
except ImportError:
html_theme = 'zephyr'
html_theme_path = ['./themes']
else:
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
if tags.has('daily') or tags.has('release'):
html_theme = 'zephyr-docs-theme'
html_theme_path = ['./themes']
if tags.has('release'):
docs_title = 'Docs / %s' %(version)
else:
docs_title = 'Docs'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = "Zephyr Project Documentation"
# This value determines the text for the permalink; it defaults to "¶".
# Set it to None or the empty string to disable permalinks.
#html_add_permalinks = ""
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants =
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_domain_indices = False
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
html_split_index = True
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink =
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
html_show_copyright = tags.has('development')
# If true, license is shown in the HTML footer. Default is True.
html_show_license = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
sourcelink_suffix = '.txt'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'zephyrdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'zephyr.tex', u'Zephyr Project Documentation',
u'many', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'zephyr', u'Zephyr Project Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'zephyr', u'Zephyr Project Documentation',
author, 'Zephyr', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
breathe_projects = {
"Zephyr": "doxygen/xml",
"doc-examples": "doxygen/xml"
}
breathe_default_project = "Zephyr"
# Qualifiers to a function are causing Sphihx/Breathe to warn about
# Error when parsing function declaration and more. This is a list
# of strings that the parser additionally should accept as
# attributes.
cpp_id_attributes = ['__syscall', '__syscall_inline', '__deprecated',
'__may_alias', '__used', '__unused', '__weak',
'__DEPRECATED_MACRO', 'FUNC_NORETURN' ]
# docs_title is used in the breadcrumb title in the zephyr docs theme
html_context = {
'show_license': html_show_license,
'docs_title': docs_title,
}
extlinks = {'jira': ('https://jira.zephyrproject.org/browse/%s', ''),
'github': ('https://github.com/zephyrproject-rtos/zephyr/issues/%s', '')
}
# some configuration for linkcheck builder
# noticed that we're getting false-positive link errors on JIRA, I suspect
# because it's taking too long for the server to respond so bump up the
# timeout (default=5) and turn off anchor checks (so only a HEAD request is
# done - much faster) Leave the ignore commented in case we want to remove
# jira link checks later...
linkcheck_timeout = 30
linkcheck_workers = 10
# linkcheck_ignore = [r'https://jira\.zephyrproject\.org/']
linkcheck_anchors = False
def setup(app):
app.add_stylesheet("zephyr-custom.css")
|
apache-2.0
|
melodous/designate
|
designate/api/v2/controllers/extensions/tenants.py
|
1
|
1568
|
# COPYRIGHT 2014 Rackspace
#
# Author: Betsy Luzader <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pecan
from designate.openstack.common import log as logging
from designate.api.v2.controllers import rest
from designate.api.v2.views.extensions import reports as reports_view
LOG = logging.getLogger(__name__)
class TenantsController(rest.RestController):
_view = reports_view.TenantsView()
@pecan.expose(template='json:', content_type='application/json')
def get_all(self):
request = pecan.request
context = pecan.request.environ['context']
tenants = self.central_api.find_tenants(context)
return self._view.list(context, request, tenants)
@pecan.expose(template='json:', content_type='application/json')
def get_one(self, tenant_id):
"""Get Tenant"""
request = pecan.request
context = request.environ['context']
tenant = self.central_api.get_tenant(context, tenant_id)
return self._view.show_detail(context, request, tenant)
|
apache-2.0
|
anilmuthineni/tensorflow
|
tensorflow/contrib/layers/python/layers/optimizers.py
|
20
|
18031
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Optimizer ops for use in layers and tf.learn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import framework as contrib_framework
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.ops import variables as vars_
from tensorflow.python.summary import summary
from tensorflow.python.training import moving_averages
from tensorflow.python.training import optimizer as optimizer_
from tensorflow.python.training import training as train
OPTIMIZER_CLS_NAMES = {
"Adagrad": train.AdagradOptimizer,
"Adam": train.AdamOptimizer,
"Ftrl": train.FtrlOptimizer,
"Momentum": train.MomentumOptimizer,
"RMSProp": train.RMSPropOptimizer,
"SGD": train.GradientDescentOptimizer,
}
OPTIMIZER_SUMMARIES = [
"learning_rate",
"loss",
"gradients",
"gradient_norm",
]
def optimize_loss(loss,
global_step,
learning_rate,
optimizer,
gradient_noise_scale=None,
gradient_multipliers=None,
clip_gradients=None,
learning_rate_decay_fn=None,
update_ops=None,
variables=None,
name=None,
summaries=None,
colocate_gradients_with_ops=False):
"""Given loss and parameters for optimizer, returns a training op.
Various ways of passing optimizers, include:
- string, name of the optimizer like 'SGD', 'Adam', see OPTIMIZER_CLS_NAMES
for full list. E.g. `optimize_loss(..., optimizer='Adam')`.
- function, takes learning rate `Tensor` as argument and must return
`Optimizer` instance. E.g. `optimize_loss(...,
optimizer=lambda lr: tf.train.MomentumOptimizer(lr, momentum=0.5))`.
Alternatively, if `learning_rate` is `None`, the function takes no
arguments. E.g. `optimize_loss(..., learning_rate=None,
optimizer=lambda: tf.train.MomentumOptimizer(0.5, momentum=0.5))`.
- class, subclass of `Optimizer` that takes only one required argument -
learning rate, such as AdamOptimizer, AdagradOptimizer.
E.g. `optimize_loss(..., optimizer=tf.train.AdagradOptimizer)`.
- object, instance of subclass of `Optimizer`.
E.g., `optimizer_loss(..., optimizer=tf.train.AdagradOptimizer(0.5))`.
Args:
loss: Scalar `Tensor`.
global_step: Scalar int `Tensor`, step counter for each update. If not
supplied, it will be fetched from the default graph (see
`tf.contrib.framework.get_global_step` for details). If it's
not been created, no step will be incremented with each weight
update. `learning_rate_decay_fn` requires `global_step`.
learning_rate: float or `Tensor`, magnitude of update per each training
step. Can be `None`.
optimizer: string, class or optimizer instance, used as trainer.
string should be name of optimizer, like 'SGD',
'Adam', 'Adagrad'. Full list in OPTIMIZER_CLS_NAMES constant.
class should be sub-class of `tf.Optimizer` that implements
`compute_gradients` and `apply_gradients` functions.
optimizer instance should be instantiation of `tf.Optimizer`
sub-class and have `compute_gradients` and `apply_gradients`
functions.
gradient_noise_scale: float or None, adds 0-mean normal noise scaled by this
value.
gradient_multipliers: dict of variables or variable names to floats.
If present, gradients for specified
variables will be multiplied by given constant.
clip_gradients: float, callable or `None`. If float, is provided, a global
clipping is applied to prevent the norm of the gradient to exceed this
value. Alternatively, a callable can be provided e.g.: adaptive_clipping.
This callable takes a `list` of `(gradients, variables)` `tuple`s and
returns the same thing with the gradients modified.
learning_rate_decay_fn: function, takes `learning_rate` and `global_step`
`Tensor`s, returns `Tensor`.
Can be used to implement any learning rate decay
functions.
For example: `tf.train.exponential_decay`.
Ignored if `learning_rate` is not supplied.
update_ops: list of update `Operation`s to execute at each step. If `None`,
uses elements of UPDATE_OPS collection. The order of execution
between `update_ops` and `loss` is non-deterministic.
variables: list of variables to optimize or
`None` to use all trainable variables.
name: The name for this operation is used to scope operations and summaries.
summaries: List of internal quantities to visualize on tensorboard. If not
set only the loss and the learning rate will be reported. The
complete list is in OPTIMIZER_SUMMARIES.
colocate_gradients_with_ops: If True, try colocating gradients with the
corresponding op.
Returns:
Training op.
Raises:
ValueError: if:
* `loss` is an invalid type or shape.
* `global_step` is an invalid type or shape.
* `learning_rate` is an invalid type or value.
* `optimizer` is wrong type.
* `clip_gradients` is not float or callable.
* `learning_rate` and `learning_rate_decay_fn` are supplied, but no
`global_step` is available.
"""
loss = ops.convert_to_tensor(loss)
contrib_framework.assert_scalar(loss)
if global_step is None:
global_step = contrib_framework.get_global_step()
else:
contrib_framework.assert_global_step(global_step)
with vs.variable_scope(name, "OptimizeLoss", [loss, global_step]):
# Update ops take UPDATE_OPS collection if not provided.
if update_ops is None:
update_ops = set(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
# Make sure update ops are ran before computing loss.
if update_ops:
loss = control_flow_ops.with_dependencies(list(update_ops), loss)
# Learning rate variable, with possible decay.
lr = None
if learning_rate is not None:
if (isinstance(learning_rate, ops.Tensor) and
learning_rate.get_shape().ndims == 0):
lr = learning_rate
elif isinstance(learning_rate, float):
if learning_rate < 0.0:
raise ValueError("Invalid learning_rate %s.", learning_rate)
lr = vs.get_variable(
"learning_rate", [],
trainable=False,
initializer=init_ops.constant_initializer(learning_rate))
else:
raise ValueError("Learning rate should be 0d Tensor or float. "
"Got %s of type %s" % (str(learning_rate),
str(type(learning_rate))))
if summaries is None:
summaries = ["loss", "learning_rate"]
else:
for summ in summaries:
if summ not in OPTIMIZER_SUMMARIES:
raise ValueError("Summaries should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_SUMMARIES), summ))
if learning_rate is not None and learning_rate_decay_fn is not None:
if global_step is None:
raise ValueError("global_step is required for learning_rate_decay_fn.")
lr = learning_rate_decay_fn(lr, global_step)
if "learning_rate" in summaries:
summary.scalar("learning_rate", lr)
# Create optimizer, given specified parameters.
if isinstance(optimizer, six.string_types):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is string (%s)." % optimizer)
if optimizer not in OPTIMIZER_CLS_NAMES:
raise ValueError(
"Optimizer name should be one of [%s], you provided %s." %
(", ".join(OPTIMIZER_CLS_NAMES), optimizer))
opt = OPTIMIZER_CLS_NAMES[optimizer](learning_rate=lr)
elif (isinstance(optimizer, type) and
issubclass(optimizer, optimizer_.Optimizer)):
if lr is None:
raise ValueError("Learning rate is None, but should be specified if "
"optimizer is class (%s)." % optimizer)
opt = optimizer(learning_rate=lr)
elif isinstance(optimizer, optimizer_.Optimizer):
opt = optimizer
elif callable(optimizer):
if learning_rate is not None:
opt = optimizer(lr)
else:
opt = optimizer()
if not isinstance(opt, optimizer_.Optimizer):
raise ValueError("Unrecognized optimizer: function should return "
"subclass of Optimizer. Got %s." % str(opt))
else:
raise ValueError("Unrecognized optimizer: should be string, "
"subclass of Optimizer, instance of "
"subclass of Optimizer or function with one argument. "
"Got %s." % str(optimizer))
# All trainable variables, if specific variables are not specified.
if variables is None:
variables = vars_.trainable_variables()
# Compute gradients.
gradients = opt.compute_gradients(
loss,
variables,
colocate_gradients_with_ops=colocate_gradients_with_ops)
# Optionally add gradient noise.
if gradient_noise_scale is not None:
gradients = _add_scaled_noise_to_gradients(gradients,
gradient_noise_scale)
# Multiply some gradients.
if gradient_multipliers is not None:
gradients = _multiply_gradients(gradients, gradient_multipliers)
if "gradient_norm" in summaries:
summary.scalar("global_norm/gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Optionally clip gradients by global norm.
if isinstance(clip_gradients, float):
gradients = _clip_gradients_by_norm(gradients, clip_gradients)
elif callable(clip_gradients):
gradients = clip_gradients(gradients)
elif clip_gradients is not None:
raise ValueError("Unknown type %s for clip_gradients" %
type(clip_gradients))
# Add scalar summary for loss.
if "loss" in summaries:
summary.scalar("loss", loss)
# Add histograms for variables, gradients and gradient norms.
for gradient, variable in gradients:
if isinstance(gradient, ops.IndexedSlices):
grad_values = gradient.values
else:
grad_values = gradient
if grad_values is not None:
var_name = variable.name.replace(":", "_")
if "gradients" in summaries:
summary.histogram("gradients/%s" % var_name, grad_values)
if "gradient_norm" in summaries:
summary.scalar("gradient_norm/%s" % var_name,
clip_ops.global_norm([grad_values]))
if clip_gradients is not None and "gradient_norm" in summaries:
summary.scalar("global_norm/clipped_gradient_norm",
clip_ops.global_norm(list(zip(*gradients))[0]))
# Create gradient updates.
grad_updates = opt.apply_gradients(
gradients, global_step=global_step, name="train")
# Ensure the train_tensor computes grad_updates.
train_tensor = control_flow_ops.with_dependencies([grad_updates], loss)
return train_tensor
def _clip_gradients_by_norm(grads_and_vars, clip_gradients):
"""Clips gradients by global norm."""
gradients, variables = zip(*grads_and_vars)
clipped_gradients, _ = clip_ops.clip_by_global_norm(gradients, clip_gradients)
return list(zip(clipped_gradients, variables))
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
"""Find max_norm given norm and previous average."""
with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
log_norm = math_ops.log(norm + epsilon)
def moving_average(name, value, decay):
moving_average_variable = vs.get_variable(
name,
shape=value.get_shape(),
dtype=value.dtype,
initializer=init_ops.zeros_initializer(),
trainable=False)
return moving_averages.assign_moving_average(
moving_average_variable, value, decay, zero_debias=False)
# quicker adaptation at the beginning
if global_step is not None:
n = math_ops.to_float(global_step)
decay = math_ops.minimum(decay, n / (n + 1.))
# update averages
mean = moving_average("mean", log_norm, decay)
sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)
variance = sq_mean - math_ops.square(mean)
std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
max_norms = math_ops.exp(mean + std_factor * std)
return max_norms, mean
def adaptive_clipping_fn(std_factor=2.,
decay=0.95,
static_max_norm=None,
global_step=None,
report_summary=False,
epsilon=1e-8,
name=None):
"""Adapt the clipping value using statistics on the norms.
Implement adaptive gradient as presented in section 3.2.1 of
https://arxiv.org/abs/1412.1602.
Keeps a moving average of the mean and std of the log(norm) of the gradient.
if the norm exceeds `exp(mean + std_factor*std)`, all gradients are rescaled
such that the global norm becomes `exp(mean)`.
Args:
std_factor: Python scaler (or tensor).
`max_norm = exp(mean + std_factor*std)`
decay: The smoothing factor of the moving averages.
static_max_norm: If provided, will threshold the norm to this value as an
extra safety.
global_step: Optional global_step. If provided, `decay = decay*n/(n+1)`.
This provides a quicker adaptation of the mean for the first steps.
report_summary: If `True`, will add histogram summaries of the `max_norm`.
epsilon: Small value chosen to avoid zero variance.
name: The name for this operation is used to scope operations and summaries.
Returns:
A function for applying gradient clipping.
"""
def gradient_clipping(grads_and_vars):
"""Internal function for adaptive clipping."""
grads, variables = zip(*grads_and_vars)
norm = clip_ops.global_norm(grads)
max_norm, log_mean = _adaptive_max_norm(norm, std_factor, decay,
global_step, epsilon, name)
# reports the max gradient norm for debugging
if report_summary:
summary.scalar("global_norm/adaptive_max_gradient_norm", max_norm)
# factor will be 1. if norm is smaller than max_norm
factor = array_ops.where(norm < max_norm,
array_ops.ones_like(norm),
math_ops.exp(log_mean) / norm)
if static_max_norm is not None:
factor = math_ops.minimum(static_max_norm / norm, factor)
# apply factor
clipped_grads = []
for grad in grads:
if grad is None:
clipped_grads.append(None)
elif isinstance(grad, ops.IndexedSlices):
clipped_grads.append(
ops.IndexedSlices(grad.values * factor, grad.indices,
grad.dense_shape))
else:
clipped_grads.append(grad * factor)
return list(zip(clipped_grads, variables))
return gradient_clipping
def _add_scaled_noise_to_gradients(grads_and_vars, gradient_noise_scale):
"""Adds scaled noise from a 0-mean normal distribution to gradients."""
gradients, variables = zip(*grads_and_vars)
noisy_gradients = []
for gradient in gradients:
if gradient is None:
noisy_gradients.append(None)
continue
if isinstance(gradient, ops.IndexedSlices):
gradient_shape = gradient.dense_shape
else:
gradient_shape = gradient.get_shape()
noise = random_ops.truncated_normal(gradient_shape) * gradient_noise_scale
noisy_gradients.append(gradient + noise)
return list(zip(noisy_gradients, variables))
def _multiply_gradients(grads_and_vars, gradient_multipliers):
"""Multiply specified gradients."""
multiplied_grads_and_vars = []
for grad, var in grads_and_vars:
if (grad is not None and
(var in gradient_multipliers or var.name in gradient_multipliers)):
key = var if var in gradient_multipliers else var.name
multiplier = constant_op.constant(
gradient_multipliers[key], dtype=dtypes.float32)
if isinstance(grad, ops.IndexedSlices):
grad_values = grad.values * multiplier
grad = ops.IndexedSlices(grad_values, grad.indices, grad.dense_shape)
else:
grad *= multiplier
multiplied_grads_and_vars.append((grad, var))
return multiplied_grads_and_vars
|
apache-2.0
|
tedi3231/openerp
|
openerp/addons/l10n_be_coda/l10n_be_coda.py
|
63
|
3843
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Noviat nv/sa (www.noviat.be). All rights reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_bank_statement(osv.osv):
_inherit = 'account.bank.statement'
_columns = {
'coda_note': fields.text('CODA Notes'),
}
class account_bank_statement_line(osv.osv):
_inherit = 'account.bank.statement.line'
_columns = {
'coda_account_number': fields.char('Account Number', help="The Counter Party Account Number")
}
def create(self, cr, uid, data, context=None):
"""
This function creates a Bank Account Number if, for a bank statement line,
the partner_id field and the coda_account_number field are set,
and the account number does not exist in the database
"""
if 'partner_id' in data and data['partner_id'] and 'coda_account_number' in data and data['coda_account_number']:
acc_number_ids = self.pool.get('res.partner.bank').search(cr, uid, [('acc_number', '=', data['coda_account_number'])])
if len(acc_number_ids) == 0:
try:
type_model, type_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'bank_normal')
type_id = self.pool.get('res.partner.bank.type').browse(cr, uid, type_id, context=context)
self.pool.get('res.partner.bank').create(cr, uid, {'acc_number': data['coda_account_number'], 'partner_id': data['partner_id'], 'state': type_id.code}, context=context)
except ValueError:
pass
return super(account_bank_statement_line, self).create(cr, uid, data, context=context)
def write(self, cr, uid, ids, vals, context=None):
super(account_bank_statement_line, self).write(cr, uid, ids, vals, context)
"""
Same as create function above, but for write function
"""
if 'partner_id' in vals:
for line in self.pool.get('account.bank.statement.line').browse(cr, uid, ids, context=context):
if line.coda_account_number:
acc_number_ids = self.pool.get('res.partner.bank').search(cr, uid, [('acc_number', '=', line.coda_account_number)])
if len(acc_number_ids) == 0:
try:
type_model, type_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'bank_normal')
type_id = self.pool.get('res.partner.bank.type').browse(cr, uid, type_id, context=context)
self.pool.get('res.partner.bank').create(cr, uid, {'acc_number': line.coda_account_number, 'partner_id': vals['partner_id'], 'state': type_id.code}, context=context)
except ValueError:
pass
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
agpl-3.0
|
bvcms/bvcms
|
CmsWeb/Lib/logging/__init__.py
|
2
|
61272
|
# Copyright 2001-2014 by Vinay Sajip. All Rights Reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of Vinay Sajip
# not be used in advertising or publicity pertaining to distribution
# of the software without specific, written prior permission.
# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
"""
Logging package for Python. Based on PEP 282 and comments thereto in
comp.lang.python.
Copyright (C) 2001-2014 Vinay Sajip. All Rights Reserved.
To use, simply 'import logging' and log away!
"""
import sys, os, time, cStringIO, traceback, warnings, weakref, collections
__all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR',
'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO',
'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler',
'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig',
'captureWarnings', 'critical', 'debug', 'disable', 'error',
'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass',
'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning']
try:
import codecs
except ImportError:
codecs = None
try:
import thread
import threading
except ImportError:
thread = None
__author__ = "Vinay Sajip <[email protected]>"
__status__ = "production"
# Note: the attributes below are no longer maintained.
__version__ = "0.5.1.2"
__date__ = "07 February 2010"
#---------------------------------------------------------------------------
# Miscellaneous module data
#---------------------------------------------------------------------------
try:
unicode
_unicode = True
except NameError:
_unicode = False
# next bit filched from 1.5.2's inspect.py
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3)
# done filching
#
# _srcfile is used when walking the stack to check when we've got the first
# caller stack frame.
#
_srcfile = os.path.normcase(currentframe.__code__.co_filename)
# _srcfile is only used in conjunction with sys._getframe().
# To provide compatibility with older versions of Python, set _srcfile
# to None if _getframe() is not available; this value will prevent
# findCaller() from being called.
#if not hasattr(sys, "_getframe"):
# _srcfile = None
#
#_startTime is used as the base when calculating the relative time of events
#
_startTime = time.time()
#
#raiseExceptions is used to see if exceptions during handling should be
#propagated
#
raiseExceptions = 1
#
# If you don't want threading information in the log, set this to zero
#
logThreads = 1
#
# If you don't want multiprocessing information in the log, set this to zero
#
logMultiprocessing = 1
#
# If you don't want process information in the log, set this to zero
#
logProcesses = 1
#---------------------------------------------------------------------------
# Level related stuff
#---------------------------------------------------------------------------
#
# Default levels and level names, these can be replaced with any positive set
# of values having corresponding names. There is a pseudo-level, NOTSET, which
# is only really there as a lower limit for user-defined levels. Handlers and
# loggers are initialized with NOTSET so that they will log all messages, even
# at user-defined levels.
#
CRITICAL = 50
FATAL = CRITICAL
ERROR = 40
WARNING = 30
WARN = WARNING
INFO = 20
DEBUG = 10
NOTSET = 0
_levelNames = {
CRITICAL : 'CRITICAL',
ERROR : 'ERROR',
WARNING : 'WARNING',
INFO : 'INFO',
DEBUG : 'DEBUG',
NOTSET : 'NOTSET',
'CRITICAL' : CRITICAL,
'ERROR' : ERROR,
'WARN' : WARNING,
'WARNING' : WARNING,
'INFO' : INFO,
'DEBUG' : DEBUG,
'NOTSET' : NOTSET,
}
def getLevelName(level):
"""
Return the textual representation of logging level 'level'.
If the level is one of the predefined levels (CRITICAL, ERROR, WARNING,
INFO, DEBUG) then you get the corresponding string. If you have
associated levels with names using addLevelName then the name you have
associated with 'level' is returned.
If a numeric value corresponding to one of the defined levels is passed
in, the corresponding string representation is returned.
Otherwise, the string "Level %s" % level is returned.
"""
return _levelNames.get(level, ("Level %s" % level))
def addLevelName(level, levelName):
"""
Associate 'levelName' with 'level'.
This is used when converting levels to text during message formatting.
"""
_acquireLock()
try: #unlikely to cause an exception, but you never know...
_levelNames[level] = levelName
_levelNames[levelName] = level
finally:
_releaseLock()
def _checkLevel(level):
if isinstance(level, (int, long)):
rv = level
elif str(level) == level:
if level not in _levelNames:
raise ValueError("Unknown level: %r" % level)
rv = _levelNames[level]
else:
raise TypeError("Level not an integer or a valid string: %r" % level)
return rv
#---------------------------------------------------------------------------
# Thread-related stuff
#---------------------------------------------------------------------------
#
#_lock is used to serialize access to shared data structures in this module.
#This needs to be an RLock because fileConfig() creates and configures
#Handlers, and so might arbitrary user threads. Since Handler code updates the
#shared dictionary _handlers, it needs to acquire the lock. But if configuring,
#the lock would already have been acquired - so we need an RLock.
#The same argument applies to Loggers and Manager.loggerDict.
#
if thread:
_lock = threading.RLock()
else:
_lock = None
def _acquireLock():
"""
Acquire the module-level lock for serializing access to shared data.
This should be released with _releaseLock().
"""
if _lock:
_lock.acquire()
def _releaseLock():
"""
Release the module-level lock acquired by calling _acquireLock().
"""
if _lock:
_lock.release()
#---------------------------------------------------------------------------
# The logging record
#---------------------------------------------------------------------------
class LogRecord(object):
"""
A LogRecord instance represents an event being logged.
LogRecord instances are created every time something is logged. They
contain all the information pertinent to the event being logged. The
main information passed in is in msg and args, which are combined
using str(msg) % args to create the message field of the record. The
record also includes information such as when the record was created,
the source line where the logging call was made, and any exception
information to be logged.
"""
def __init__(self, name, level, pathname, lineno,
msg, args, exc_info, func=None):
"""
Initialize a logging record with interesting information.
"""
ct = time.time()
self.name = name
self.msg = msg
#
# The following statement allows passing of a dictionary as a sole
# argument, so that you can do something like
# logging.debug("a %(a)d b %(b)s", {'a':1, 'b':2})
# Suggested by Stefan Behnel.
# Note that without the test for args[0], we get a problem because
# during formatting, we test to see if the arg is present using
# 'if self.args:'. If the event being logged is e.g. 'Value is %d'
# and if the passed arg fails 'if self.args:' then no formatting
# is done. For example, logger.warn('Value is %d', 0) would log
# 'Value is %d' instead of 'Value is 0'.
# For the use case of passing a dictionary, this should not be a
# problem.
# Issue #21172: a request was made to relax the isinstance check
# to hasattr(args[0], '__getitem__'). However, the docs on string
# formatting still seem to suggest a mapping object is required.
# Thus, while not removing the isinstance check, it does now look
# for collections.Mapping rather than, as before, dict.
if (args and len(args) == 1 and isinstance(args[0], collections.Mapping)
and args[0]):
args = args[0]
self.args = args
self.levelname = getLevelName(level)
self.levelno = level
self.pathname = pathname
try:
self.filename = os.path.basename(pathname)
self.module = os.path.splitext(self.filename)[0]
except (TypeError, ValueError, AttributeError):
self.filename = pathname
self.module = "Unknown module"
self.exc_info = exc_info
self.exc_text = None # used to cache the traceback text
self.lineno = lineno
self.funcName = func
self.created = ct
self.msecs = (ct - long(ct)) * 1000
self.relativeCreated = (self.created - _startTime) * 1000
if logThreads and thread:
self.thread = thread.get_ident()
self.threadName = threading.current_thread().name
else:
self.thread = None
self.threadName = None
if not logMultiprocessing:
self.processName = None
else:
self.processName = 'MainProcess'
mp = sys.modules.get('multiprocessing')
if mp is not None:
# Errors may occur if multiprocessing has not finished loading
# yet - e.g. if a custom import hook causes third-party code
# to run when multiprocessing calls import. See issue 8200
# for an example
try:
self.processName = mp.current_process().name
except StandardError:
pass
if logProcesses and hasattr(os, 'getpid'):
self.process = os.getpid()
else:
self.process = None
def __str__(self):
return '<LogRecord: %s, %s, %s, %s, "%s">'%(self.name, self.levelno,
self.pathname, self.lineno, self.msg)
def getMessage(self):
"""
Return the message for this LogRecord.
Return the message for this LogRecord after merging any user-supplied
arguments with the message.
"""
if not _unicode: #if no unicode support...
msg = str(self.msg)
else:
msg = self.msg
if not isinstance(msg, basestring):
try:
msg = str(self.msg)
except UnicodeError:
msg = self.msg #Defer encoding till later
if self.args:
msg = msg % self.args
return msg
def makeLogRecord(dict):
"""
Make a LogRecord whose attributes are defined by the specified dictionary,
This function is useful for converting a logging event received over
a socket connection (which is sent as a dictionary) into a LogRecord
instance.
"""
rv = LogRecord(None, None, "", 0, "", (), None, None)
rv.__dict__.update(dict)
return rv
#---------------------------------------------------------------------------
# Formatter classes and functions
#---------------------------------------------------------------------------
class Formatter(object):
"""
Formatter instances are used to convert a LogRecord to text.
Formatters need to know how a LogRecord is constructed. They are
responsible for converting a LogRecord to (usually) a string which can
be interpreted by either a human or an external system. The base Formatter
allows a formatting string to be specified. If none is supplied, the
default value of "%s(message)\\n" is used.
The Formatter can be initialized with a format string which makes use of
knowledge of the LogRecord attributes - e.g. the default value mentioned
above makes use of the fact that the user's message and arguments are pre-
formatted into a LogRecord's message attribute. Currently, the useful
attributes in a LogRecord are described by:
%(name)s Name of the logger (logging channel)
%(levelno)s Numeric logging level for the message (DEBUG, INFO,
WARNING, ERROR, CRITICAL)
%(levelname)s Text logging level for the message ("DEBUG", "INFO",
"WARNING", "ERROR", "CRITICAL")
%(pathname)s Full pathname of the source file where the logging
call was issued (if available)
%(filename)s Filename portion of pathname
%(module)s Module (name portion of filename)
%(lineno)d Source line number where the logging call was issued
(if available)
%(funcName)s Function name
%(created)f Time when the LogRecord was created (time.time()
return value)
%(asctime)s Textual time when the LogRecord was created
%(msecs)d Millisecond portion of the creation time
%(relativeCreated)d Time in milliseconds when the LogRecord was created,
relative to the time the logging module was loaded
(typically at application startup time)
%(thread)d Thread ID (if available)
%(threadName)s Thread name (if available)
%(process)d Process ID (if available)
%(message)s The result of record.getMessage(), computed just as
the record is emitted
"""
converter = time.localtime
def __init__(self, fmt=None, datefmt=None):
"""
Initialize the formatter with specified format strings.
Initialize the formatter either with the specified format string, or a
default as described above. Allow for specialized date formatting with
the optional datefmt argument (if omitted, you get the ISO8601 format).
"""
if fmt:
self._fmt = fmt
else:
self._fmt = "%(message)s"
self.datefmt = datefmt
def formatTime(self, record, datefmt=None):
"""
Return the creation time of the specified LogRecord as formatted text.
This method should be called from format() by a formatter which
wants to make use of a formatted time. This method can be overridden
in formatters to provide for any specific requirement, but the
basic behaviour is as follows: if datefmt (a string) is specified,
it is used with time.strftime() to format the creation time of the
record. Otherwise, the ISO8601 format is used. The resulting
string is returned. This function uses a user-configurable function
to convert the creation time to a tuple. By default, time.localtime()
is used; to change this for a particular formatter instance, set the
'converter' attribute to a function with the same signature as
time.localtime() or time.gmtime(). To change it for all formatters,
for example if you want all logging times to be shown in GMT,
set the 'converter' attribute in the Formatter class.
"""
ct = self.converter(record.created)
if datefmt:
s = time.strftime(datefmt, ct)
else:
t = time.strftime("%Y-%m-%d %H:%M:%S", ct)
s = "%s,%03d" % (t, record.msecs)
return s
def formatException(self, ei):
"""
Format and return the specified exception information as a string.
This default implementation just uses
traceback.print_exception()
"""
sio = cStringIO.StringIO()
traceback.print_exception(ei[0], ei[1], ei[2], None, sio)
s = sio.getvalue()
sio.close()
if s[-1:] == "\n":
s = s[:-1]
return s
def usesTime(self):
"""
Check if the format uses the creation time of the record.
"""
return self._fmt.find("%(asctime)") >= 0
def format(self, record):
"""
Format the specified record as text.
The record's attribute dictionary is used as the operand to a
string formatting operation which yields the returned string.
Before formatting the dictionary, a couple of preparatory steps
are carried out. The message attribute of the record is computed
using LogRecord.getMessage(). If the formatting string uses the
time (as determined by a call to usesTime(), formatTime() is
called to format the event time. If there is exception information,
it is formatted using formatException() and appended to the message.
"""
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
try:
s = self._fmt % record.__dict__
except UnicodeDecodeError as e:
# Issue 25664. The logger name may be Unicode. Try again ...
try:
record.name = record.name.decode('utf-8')
s = self._fmt % record.__dict__
except UnicodeDecodeError:
raise e
if record.exc_info:
# Cache the traceback text to avoid converting it multiple times
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s = s + "\n"
try:
s = s + record.exc_text
except UnicodeError:
# Sometimes filenames have non-ASCII chars, which can lead
# to errors when s is Unicode and record.exc_text is str
# See issue 8924.
# We also use replace for when there are multiple
# encodings, e.g. UTF-8 for the filesystem and latin-1
# for a script. See issue 13232.
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
#
# The default formatter to use when no other is specified
#
_defaultFormatter = Formatter()
class BufferingFormatter(object):
"""
A formatter suitable for formatting a number of records.
"""
def __init__(self, linefmt=None):
"""
Optionally specify a formatter which will be used to format each
individual record.
"""
if linefmt:
self.linefmt = linefmt
else:
self.linefmt = _defaultFormatter
def formatHeader(self, records):
"""
Return the header string for the specified records.
"""
return ""
def formatFooter(self, records):
"""
Return the footer string for the specified records.
"""
return ""
def format(self, records):
"""
Format the specified records and return the result as a string.
"""
rv = ""
if len(records) > 0:
rv = rv + self.formatHeader(records)
for record in records:
rv = rv + self.linefmt.format(record)
rv = rv + self.formatFooter(records)
return rv
#---------------------------------------------------------------------------
# Filter classes and functions
#---------------------------------------------------------------------------
class Filter(object):
"""
Filter instances are used to perform arbitrary filtering of LogRecords.
Loggers and Handlers can optionally use Filter instances to filter
records as desired. The base filter class only allows events which are
below a certain point in the logger hierarchy. For example, a filter
initialized with "A.B" will allow events logged by loggers "A.B",
"A.B.C", "A.B.C.D", "A.B.D" etc. but not "A.BB", "B.A.B" etc. If
initialized with the empty string, all events are passed.
"""
def __init__(self, name=''):
"""
Initialize a filter.
Initialize with the name of the logger which, together with its
children, will have its events allowed through the filter. If no
name is specified, allow every event.
"""
self.name = name
self.nlen = len(name)
def filter(self, record):
"""
Determine if the specified record is to be logged.
Is the specified record to be logged? Returns 0 for no, nonzero for
yes. If deemed appropriate, the record may be modified in-place.
"""
if self.nlen == 0:
return 1
elif self.name == record.name:
return 1
elif record.name.find(self.name, 0, self.nlen) != 0:
return 0
return (record.name[self.nlen] == ".")
class Filterer(object):
"""
A base class for loggers and handlers which allows them to share
common code.
"""
def __init__(self):
"""
Initialize the list of filters to be an empty list.
"""
self.filters = []
def addFilter(self, filter):
"""
Add the specified filter to this handler.
"""
if not (filter in self.filters):
self.filters.append(filter)
def removeFilter(self, filter):
"""
Remove the specified filter from this handler.
"""
if filter in self.filters:
self.filters.remove(filter)
def filter(self, record):
"""
Determine if a record is loggable by consulting all the filters.
The default is to allow the record to be logged; any filter can veto
this and the record is then dropped. Returns a zero value if a record
is to be dropped, else non-zero.
"""
rv = 1
for f in self.filters:
if not f.filter(record):
rv = 0
break
return rv
#---------------------------------------------------------------------------
# Handler classes and functions
#---------------------------------------------------------------------------
_handlers = weakref.WeakValueDictionary() #map of handler names to handlers
_handlerList = [] # added to allow handlers to be removed in reverse of order initialized
def _removeHandlerRef(wr):
"""
Remove a handler reference from the internal cleanup list.
"""
# This function can be called during module teardown, when globals are
# set to None. It can also be called from another thread. So we need to
# pre-emptively grab the necessary globals and check if they're None,
# to prevent race conditions and failures during interpreter shutdown.
acquire, release, handlers = _acquireLock, _releaseLock, _handlerList
if acquire and release and handlers:
acquire()
try:
if wr in handlers:
handlers.remove(wr)
finally:
release()
def _addHandlerRef(handler):
"""
Add a handler to the internal cleanup list using a weak reference.
"""
_acquireLock()
try:
_handlerList.append(weakref.ref(handler, _removeHandlerRef))
finally:
_releaseLock()
class Handler(Filterer):
"""
Handler instances dispatch logging events to specific destinations.
The base handler class. Acts as a placeholder which defines the Handler
interface. Handlers can optionally use Formatter instances to format
records as desired. By default, no formatter is specified; in this case,
the 'raw' message as determined by record.message is logged.
"""
def __init__(self, level=NOTSET):
"""
Initializes the instance - basically setting the formatter to None
and the filter list to empty.
"""
Filterer.__init__(self)
self._name = None
self.level = _checkLevel(level)
self.formatter = None
# Add the handler to the global _handlerList (for cleanup on shutdown)
_addHandlerRef(self)
self.createLock()
def get_name(self):
return self._name
def set_name(self, name):
_acquireLock()
try:
if self._name in _handlers:
del _handlers[self._name]
self._name = name
if name:
_handlers[name] = self
finally:
_releaseLock()
name = property(get_name, set_name)
def createLock(self):
"""
Acquire a thread lock for serializing access to the underlying I/O.
"""
if thread:
self.lock = threading.RLock()
else:
self.lock = None
def acquire(self):
"""
Acquire the I/O thread lock.
"""
if self.lock:
self.lock.acquire()
def release(self):
"""
Release the I/O thread lock.
"""
if self.lock:
self.lock.release()
def setLevel(self, level):
"""
Set the logging level of this handler.
"""
self.level = _checkLevel(level)
def format(self, record):
"""
Format the specified record.
If a formatter is set, use it. Otherwise, use the default formatter
for the module.
"""
if self.formatter:
fmt = self.formatter
else:
fmt = _defaultFormatter
return fmt.format(record)
def emit(self, record):
"""
Do whatever it takes to actually log the specified logging record.
This version is intended to be implemented by subclasses and so
raises a NotImplementedError.
"""
raise NotImplementedError('emit must be implemented '
'by Handler subclasses')
def handle(self, record):
"""
Conditionally emit the specified logging record.
Emission depends on filters which may have been added to the handler.
Wrap the actual emission of the record with acquisition/release of
the I/O thread lock. Returns whether the filter passed the record for
emission.
"""
rv = self.filter(record)
if rv:
self.acquire()
try:
self.emit(record)
finally:
self.release()
return rv
def setFormatter(self, fmt):
"""
Set the formatter for this handler.
"""
self.formatter = fmt
def flush(self):
"""
Ensure all logging output has been flushed.
This version does nothing and is intended to be implemented by
subclasses.
"""
pass
def close(self):
"""
Tidy up any resources used by the handler.
This version removes the handler from an internal map of handlers,
_handlers, which is used for handler lookup by name. Subclasses
should ensure that this gets called from overridden close()
methods.
"""
#get the module data lock, as we're updating a shared structure.
_acquireLock()
try: #unlikely to raise an exception, but you never know...
if self._name and self._name in _handlers:
del _handlers[self._name]
finally:
_releaseLock()
def handleError(self, record):
"""
Handle errors which occur during an emit() call.
This method should be called from handlers when an exception is
encountered during an emit() call. If raiseExceptions is false,
exceptions get silently ignored. This is what is mostly wanted
for a logging system - most users will not care about errors in
the logging system, they are more interested in application errors.
You could, however, replace this with a custom handler if you wish.
The record which was being processed is passed in to this method.
"""
if raiseExceptions and sys.stderr: # see issue 13807
ei = sys.exc_info()
try:
traceback.print_exception(ei[0], ei[1], ei[2],
None, sys.stderr)
sys.stderr.write('Logged from file %s, line %s\n' % (
record.filename, record.lineno))
except IOError:
pass # see issue 5971
finally:
del ei
class StreamHandler(Handler):
"""
A handler class which writes logging records, appropriately formatted,
to a stream. Note that this class does not close the stream, as
sys.stdout or sys.stderr may be used.
"""
def __init__(self, stream=None):
"""
Initialize the handler.
If stream is not specified, sys.stderr is used.
"""
Handler.__init__(self)
if stream is None:
stream = sys.stderr
self.stream = stream
def flush(self):
"""
Flushes the stream.
"""
self.acquire()
try:
if self.stream and hasattr(self.stream, "flush"):
self.stream.flush()
finally:
self.release()
def emit(self, record):
"""
Emit a record.
If a formatter is specified, it is used to format the record.
The record is then written to the stream with a trailing newline. If
exception information is present, it is formatted using
traceback.print_exception and appended to the stream. If the stream
has an 'encoding' attribute, it is used to determine how to do the
output to the stream.
"""
try:
msg = self.format(record)
stream = self.stream
fs = "%s\n"
if not _unicode: #if no unicode support...
stream.write(fs % msg)
else:
try:
if (isinstance(msg, unicode) and
getattr(stream, 'encoding', None)):
ufs = u'%s\n'
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
#Printing to terminals sometimes fails. For example,
#with an encoding of 'cp1251', the above write will
#work if written to a stream opened or wrapped by
#the codecs module, but fail when writing to a
#terminal even when the codepage is set to cp1251.
#An extra encoding step seems to be needed.
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record)
class FileHandler(StreamHandler):
"""
A handler class which writes formatted logging records to disk files.
"""
def __init__(self, filename, mode='a', encoding=None, delay=0):
"""
Open the specified file and use it as the stream for logging.
"""
#keep the absolute path, otherwise derived classes which use this
#may come a cropper when the current directory changes
if codecs is None:
encoding = None
self.baseFilename = os.path.abspath(filename)
self.mode = mode
self.encoding = encoding
self.delay = delay
if delay:
#We don't open the stream, but we still need to call the
#Handler constructor to set level, formatter, lock etc.
Handler.__init__(self)
self.stream = None
else:
StreamHandler.__init__(self, self._open())
def close(self):
"""
Closes the stream.
"""
self.acquire()
try:
try:
if self.stream:
try:
self.flush()
finally:
stream = self.stream
self.stream = None
if hasattr(stream, "close"):
stream.close()
finally:
# Issue #19523: call unconditionally to
# prevent a handler leak when delay is set
StreamHandler.close(self)
finally:
self.release()
def _open(self):
"""
Open the current base file with the (original) mode and encoding.
Return the resulting stream.
"""
if self.encoding is None:
stream = open(self.baseFilename, self.mode)
else:
stream = codecs.open(self.baseFilename, self.mode, self.encoding)
return stream
def emit(self, record):
"""
Emit a record.
If the stream was not opened because 'delay' was specified in the
constructor, open it before calling the superclass's emit.
"""
if self.stream is None:
self.stream = self._open()
StreamHandler.emit(self, record)
#---------------------------------------------------------------------------
# Manager classes and functions
#---------------------------------------------------------------------------
class PlaceHolder(object):
"""
PlaceHolder instances are used in the Manager logger hierarchy to take
the place of nodes for which no loggers have been defined. This class is
intended for internal use only and not as part of the public API.
"""
def __init__(self, alogger):
"""
Initialize with the specified logger being a child of this placeholder.
"""
#self.loggers = [alogger]
self.loggerMap = { alogger : None }
def append(self, alogger):
"""
Add the specified logger as a child of this placeholder.
"""
#if alogger not in self.loggers:
if alogger not in self.loggerMap:
#self.loggers.append(alogger)
self.loggerMap[alogger] = None
#
# Determine which class to use when instantiating loggers.
#
_loggerClass = None
def setLoggerClass(klass):
"""
Set the class to be used when instantiating a logger. The class should
define __init__() such that only a name argument is required, and the
__init__() should call Logger.__init__()
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
global _loggerClass
_loggerClass = klass
def getLoggerClass():
"""
Return the class to be used when instantiating a logger.
"""
return _loggerClass
class Manager(object):
"""
There is [under normal circumstances] just one Manager instance, which
holds the hierarchy of loggers.
"""
def __init__(self, rootnode):
"""
Initialize the manager with the root node of the logger hierarchy.
"""
self.root = rootnode
self.disable = 0
self.emittedNoHandlerWarning = 0
self.loggerDict = {}
self.loggerClass = None
def getLogger(self, name):
"""
Get a logger with the specified name (channel name), creating it
if it doesn't yet exist. This name is a dot-separated hierarchical
name, such as "a", "a.b", "a.b.c" or similar.
If a PlaceHolder existed for the specified name [i.e. the logger
didn't exist but a child of it did], replace it with the created
logger and fix up the parent/child references which pointed to the
placeholder to now point to the logger.
"""
rv = None
if not isinstance(name, basestring):
raise TypeError('A logger name must be string or Unicode')
if isinstance(name, unicode):
name = name.encode('utf-8')
_acquireLock()
try:
if name in self.loggerDict:
rv = self.loggerDict[name]
if isinstance(rv, PlaceHolder):
ph = rv
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupChildren(ph, rv)
self._fixupParents(rv)
else:
rv = (self.loggerClass or _loggerClass)(name)
rv.manager = self
self.loggerDict[name] = rv
self._fixupParents(rv)
finally:
_releaseLock()
return rv
def setLoggerClass(self, klass):
"""
Set the class to be used when instantiating a logger with this Manager.
"""
if klass != Logger:
if not issubclass(klass, Logger):
raise TypeError("logger not derived from logging.Logger: "
+ klass.__name__)
self.loggerClass = klass
def _fixupParents(self, alogger):
"""
Ensure that there are either loggers or placeholders all the way
from the specified logger to the root of the logger hierarchy.
"""
name = alogger.name
i = name.rfind(".")
rv = None
while (i > 0) and not rv:
substr = name[:i]
if substr not in self.loggerDict:
self.loggerDict[substr] = PlaceHolder(alogger)
else:
obj = self.loggerDict[substr]
if isinstance(obj, Logger):
rv = obj
else:
assert isinstance(obj, PlaceHolder)
obj.append(alogger)
i = name.rfind(".", 0, i - 1)
if not rv:
rv = self.root
alogger.parent = rv
def _fixupChildren(self, ph, alogger):
"""
Ensure that children of the placeholder ph are connected to the
specified logger.
"""
name = alogger.name
namelen = len(name)
for c in ph.loggerMap.keys():
#The if means ... if not c.parent.name.startswith(nm)
if c.parent.name[:namelen] != name:
alogger.parent = c.parent
c.parent = alogger
#---------------------------------------------------------------------------
# Logger classes and functions
#---------------------------------------------------------------------------
class Logger(Filterer):
"""
Instances of the Logger class represent a single logging channel. A
"logging channel" indicates an area of an application. Exactly how an
"area" is defined is up to the application developer. Since an
application can have any number of areas, logging channels are identified
by a unique string. Application areas can be nested (e.g. an area
of "input processing" might include sub-areas "read CSV files", "read
XLS files" and "read Gnumeric files"). To cater for this natural nesting,
channel names are organized into a namespace hierarchy where levels are
separated by periods, much like the Java or Python package namespace. So
in the instance given above, channel names might be "input" for the upper
level, and "input.csv", "input.xls" and "input.gnu" for the sub-levels.
There is no arbitrary limit to the depth of nesting.
"""
def __init__(self, name, level=NOTSET):
"""
Initialize the logger with a name and an optional level.
"""
Filterer.__init__(self)
self.name = name
self.level = _checkLevel(level)
self.parent = None
self.propagate = 1
self.handlers = []
self.disabled = 0
def setLevel(self, level):
"""
Set the logging level of this logger.
"""
self.level = _checkLevel(level)
def debug(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'DEBUG'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.debug("Houston, we have a %s", "thorny problem", exc_info=1)
"""
if self.isEnabledFor(DEBUG):
self._log(DEBUG, msg, args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'INFO'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.info("Houston, we have a %s", "interesting problem", exc_info=1)
"""
if self.isEnabledFor(INFO):
self._log(INFO, msg, args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'WARNING'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.warning("Houston, we have a %s", "bit of a problem", exc_info=1)
"""
if self.isEnabledFor(WARNING):
self._log(WARNING, msg, args, **kwargs)
warn = warning
def error(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'ERROR'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.error("Houston, we have a %s", "major problem", exc_info=1)
"""
if self.isEnabledFor(ERROR):
self._log(ERROR, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Convenience method for logging an ERROR with exception information.
"""
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Log 'msg % args' with severity 'CRITICAL'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.critical("Houston, we have a %s", "major disaster", exc_info=1)
"""
if self.isEnabledFor(CRITICAL):
self._log(CRITICAL, msg, args, **kwargs)
fatal = critical
def log(self, level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level'.
To pass exception information, use the keyword argument exc_info with
a true value, e.g.
logger.log(level, "We have a %s", "mysterious problem", exc_info=1)
"""
if not isinstance(level, (int, long)):
if raiseExceptions:
raise TypeError("level must be an integer")
else:
return
if self.isEnabledFor(level):
self._log(level, msg, args, **kwargs)
def findCaller(self):
"""
Find the stack frame of the caller so that we can note the source
file name, line number and function name.
"""
f = currentframe()
#On some versions of IronPython, currentframe() returns None if
#IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
rv = "(unknown file)", 0, "(unknown function)"
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename == _srcfile:
f = f.f_back
continue
rv = (co.co_filename, f.f_lineno, co.co_name)
break
return rv
def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, extra=None):
"""
A factory method which can be overridden in subclasses to create
specialized LogRecords.
"""
rv = LogRecord(name, level, fn, lno, msg, args, exc_info, func)
if extra is not None:
for key in extra:
if (key in ["message", "asctime"]) or (key in rv.__dict__):
raise KeyError("Attempt to overwrite %r in LogRecord" % key)
rv.__dict__[key] = extra[key]
return rv
def _log(self, level, msg, args, exc_info=None, extra=None):
"""
Low-level logging routine which creates a LogRecord and then calls
all the handlers of this logger to handle the record.
"""
if _srcfile:
#IronPython doesn't track Python frames, so findCaller raises an
#exception on some versions of IronPython. We trap it here so that
#IronPython can use logging.
try:
fn, lno, func = self.findCaller()
except ValueError:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
else:
fn, lno, func = "(unknown file)", 0, "(unknown function)"
if exc_info:
if not isinstance(exc_info, tuple):
exc_info = sys.exc_info()
record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra)
self.handle(record)
def handle(self, record):
"""
Call the handlers for the specified record.
This method is used for unpickled records received from a socket, as
well as those created locally. Logger-level filtering is applied.
"""
if (not self.disabled) and self.filter(record):
self.callHandlers(record)
def addHandler(self, hdlr):
"""
Add the specified handler to this logger.
"""
_acquireLock()
try:
if not (hdlr in self.handlers):
self.handlers.append(hdlr)
finally:
_releaseLock()
def removeHandler(self, hdlr):
"""
Remove the specified handler from this logger.
"""
_acquireLock()
try:
if hdlr in self.handlers:
self.handlers.remove(hdlr)
finally:
_releaseLock()
def callHandlers(self, record):
"""
Pass a record to all relevant handlers.
Loop through all handlers for this logger and its parents in the
logger hierarchy. If no handler was found, output a one-off error
message to sys.stderr. Stop searching up the hierarchy whenever a
logger with the "propagate" attribute set to zero is found - that
will be the last logger whose handlers are called.
"""
c = self
found = 0
while c:
for hdlr in c.handlers:
found = found + 1
if record.levelno >= hdlr.level:
hdlr.handle(record)
if not c.propagate:
c = None #break out
else:
c = c.parent
if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning:
sys.stderr.write("No handlers could be found for logger"
" \"%s\"\n" % self.name)
self.manager.emittedNoHandlerWarning = 1
def getEffectiveLevel(self):
"""
Get the effective level for this logger.
Loop through this logger and its parents in the logger hierarchy,
looking for a non-zero logging level. Return the first one found.
"""
logger = self
while logger:
if logger.level:
return logger.level
logger = logger.parent
return NOTSET
def isEnabledFor(self, level):
"""
Is this logger enabled for level 'level'?
"""
if self.manager.disable >= level:
return 0
return level >= self.getEffectiveLevel()
def getChild(self, suffix):
"""
Get a logger which is a descendant to this one.
This is a convenience method, such that
logging.getLogger('abc').getChild('def.ghi')
is the same as
logging.getLogger('abc.def.ghi')
It's useful, for example, when the parent logger is named using
__name__ rather than a literal string.
"""
if self.root is not self:
suffix = '.'.join((self.name, suffix))
return self.manager.getLogger(suffix)
class RootLogger(Logger):
"""
A root logger is not that different to any other logger, except that
it must have a logging level and there is only one instance of it in
the hierarchy.
"""
def __init__(self, level):
"""
Initialize the logger with the name "root".
"""
Logger.__init__(self, "root", level)
_loggerClass = Logger
class LoggerAdapter(object):
"""
An adapter for loggers which makes it easier to specify contextual
information in logging output.
"""
def __init__(self, logger, extra):
"""
Initialize the adapter with a logger and a dict-like object which
provides contextual information. This constructor signature allows
easy stacking of LoggerAdapters, if so desired.
You can effectively pass keyword arguments as shown in the
following example:
adapter = LoggerAdapter(someLogger, dict(p1=v1, p2="v2"))
"""
self.logger = logger
self.extra = extra
def process(self, msg, kwargs):
"""
Process the logging message and keyword arguments passed in to
a logging call to insert contextual information. You can either
manipulate the message itself, the keyword args or both. Return
the message and kwargs modified (or not) to suit your needs.
Normally, you'll only need to override this one method in a
LoggerAdapter subclass for your specific needs.
"""
kwargs["extra"] = self.extra
return msg, kwargs
def debug(self, msg, *args, **kwargs):
"""
Delegate a debug call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.debug(msg, *args, **kwargs)
def info(self, msg, *args, **kwargs):
"""
Delegate an info call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.info(msg, *args, **kwargs)
def warning(self, msg, *args, **kwargs):
"""
Delegate a warning call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.warning(msg, *args, **kwargs)
def error(self, msg, *args, **kwargs):
"""
Delegate an error call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.error(msg, *args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""
Delegate an exception call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
kwargs["exc_info"] = 1
self.logger.error(msg, *args, **kwargs)
def critical(self, msg, *args, **kwargs):
"""
Delegate a critical call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.critical(msg, *args, **kwargs)
def log(self, level, msg, *args, **kwargs):
"""
Delegate a log call to the underlying logger, after adding
contextual information from this adapter instance.
"""
msg, kwargs = self.process(msg, kwargs)
self.logger.log(level, msg, *args, **kwargs)
def isEnabledFor(self, level):
"""
See if the underlying logger is enabled for the specified level.
"""
return self.logger.isEnabledFor(level)
root = RootLogger(WARNING)
Logger.root = root
Logger.manager = Manager(Logger.root)
#---------------------------------------------------------------------------
# Configuration classes and functions
#---------------------------------------------------------------------------
BASIC_FORMAT = "%(levelname)s:%(name)s:%(message)s"
def basicConfig(**kwargs):
"""
Do basic configuration for the logging system.
This function does nothing if the root logger already has handlers
configured. It is a convenience method intended for use by simple scripts
to do one-shot configuration of the logging package.
The default behaviour is to create a StreamHandler which writes to
sys.stderr, set a formatter using the BASIC_FORMAT format string, and
add the handler to the root logger.
A number of optional keyword arguments may be specified, which can alter
the default behaviour.
filename Specifies that a FileHandler be created, using the specified
filename, rather than a StreamHandler.
filemode Specifies the mode to open the file, if filename is specified
(if filemode is unspecified, it defaults to 'a').
format Use the specified format string for the handler.
datefmt Use the specified date/time format.
level Set the root logger level to the specified level.
stream Use the specified stream to initialize the StreamHandler. Note
that this argument is incompatible with 'filename' - if both
are present, 'stream' is ignored.
Note that you could specify a stream created using open(filename, mode)
rather than passing the filename and mode in. However, it should be
remembered that StreamHandler does not close its stream (since it may be
using sys.stdout or sys.stderr), whereas FileHandler closes its stream
when the handler is closed.
"""
# Add thread safety in case someone mistakenly calls
# basicConfig() from multiple threads
_acquireLock()
try:
if len(root.handlers) == 0:
filename = kwargs.get("filename")
if filename:
mode = kwargs.get("filemode", 'a')
hdlr = FileHandler(filename, mode)
else:
stream = kwargs.get("stream")
hdlr = StreamHandler(stream)
fs = kwargs.get("format", BASIC_FORMAT)
dfs = kwargs.get("datefmt", None)
fmt = Formatter(fs, dfs)
hdlr.setFormatter(fmt)
root.addHandler(hdlr)
level = kwargs.get("level")
if level is not None:
root.setLevel(level)
finally:
_releaseLock()
#---------------------------------------------------------------------------
# Utility functions at module level.
# Basically delegate everything to the root logger.
#---------------------------------------------------------------------------
def getLogger(name=None):
"""
Return a logger with the specified name, creating it if necessary.
If no name is specified, return the root logger.
"""
if name:
return Logger.manager.getLogger(name)
else:
return root
#def getRootLogger():
# """
# Return the root logger.
#
# Note that getLogger('') now does the same thing, so this function is
# deprecated and may disappear in the future.
# """
# return root
def critical(msg, *args, **kwargs):
"""
Log a message with severity 'CRITICAL' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.critical(msg, *args, **kwargs)
fatal = critical
def error(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.error(msg, *args, **kwargs)
def exception(msg, *args, **kwargs):
"""
Log a message with severity 'ERROR' on the root logger,
with exception information.
"""
kwargs['exc_info'] = 1
error(msg, *args, **kwargs)
def warning(msg, *args, **kwargs):
"""
Log a message with severity 'WARNING' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.warning(msg, *args, **kwargs)
warn = warning
def info(msg, *args, **kwargs):
"""
Log a message with severity 'INFO' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.info(msg, *args, **kwargs)
def debug(msg, *args, **kwargs):
"""
Log a message with severity 'DEBUG' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.debug(msg, *args, **kwargs)
def log(level, msg, *args, **kwargs):
"""
Log 'msg % args' with the integer severity 'level' on the root logger.
"""
if len(root.handlers) == 0:
basicConfig()
root.log(level, msg, *args, **kwargs)
def disable(level):
"""
Disable all logging calls of severity 'level' and below.
"""
root.manager.disable = level
def shutdown(handlerList=_handlerList):
"""
Perform any cleanup actions in the logging system (e.g. flushing
buffers).
Should be called at application exit.
"""
for wr in reversed(handlerList[:]):
#errors might occur, for example, if files are locked
#we just ignore them if raiseExceptions is not set
try:
h = wr()
if h:
try:
h.acquire()
h.flush()
h.close()
except (IOError, ValueError):
# Ignore errors which might be caused
# because handlers have been closed but
# references to them are still around at
# application exit.
pass
finally:
h.release()
except:
if raiseExceptions:
raise
#else, swallow
#Let's try and shutdown automatically on application exit...
import atexit
atexit.register(shutdown)
# Null handler
class NullHandler(Handler):
"""
This handler does nothing. It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a user
of the library does not configure logging, the one-off warning might be
produced; to avoid this, the library developer simply needs to instantiate
a NullHandler and add it to the top-level logger of the library module or
package.
"""
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
# Warnings integration
_warnings_showwarning = None
def _showwarning(message, category, filename, lineno, file=None, line=None):
"""
Implementation of showwarnings which redirects to logging, which will first
check to see if the file parameter is None. If a file is specified, it will
delegate to the original warnings implementation of showwarning. Otherwise,
it will call warnings.formatwarning and will log the resulting string to a
warnings logger named "py.warnings" with level logging.WARNING.
"""
if file is not None:
if _warnings_showwarning is not None:
_warnings_showwarning(message, category, filename, lineno, file, line)
else:
s = warnings.formatwarning(message, category, filename, lineno, line)
logger = getLogger("py.warnings")
if not logger.handlers:
logger.addHandler(NullHandler())
logger.warning("%s", s)
def captureWarnings(capture):
"""
If capture is true, redirect all warnings to the logging package.
If capture is False, ensure that warnings are not redirected to logging
but to their original destinations.
"""
global _warnings_showwarning
if capture:
if _warnings_showwarning is None:
_warnings_showwarning = warnings.showwarning
warnings.showwarning = _showwarning
else:
if _warnings_showwarning is not None:
warnings.showwarning = _warnings_showwarning
_warnings_showwarning = None
|
gpl-2.0
|
RafaelCosman/pybrain
|
pybrain/tools/customxml/networkreader.py
|
25
|
3676
|
from __future__ import print_function
__author__ = 'Tom Schaul, [email protected]'
from .handling import XMLHandling
# those imports are necessary for the eval() commands to find the right classes
import pybrain #@UnusedImport
from scipy import array #@UnusedImport
try:
import arac.pybrainbridge #@UnusedImport
except ImportError:
pass
class NetworkReader(XMLHandling):
""" A class that can take read a network from an XML file """
mothers = {}
modules = {}
@staticmethod
def readFrom(filename, name = None, index = 0):
""" append the network to an existing xml file
:key name: if this parameter is specified, read the network with this name
:key index: which network in the file shall be read (if there is more than one)
"""
r = NetworkReader(filename, newfile = False)
if name:
netroot = r.findNamedNode('Network', name)
else:
netroot = r.findNode('Network', index)
return r.readNetwork(netroot)
def readNetwork(self, node):
# TODO: why is this necessary?
import pybrain.structure.networks.custom #@Reimport @UnusedImport
nclass = eval(str(node.getAttribute('class')))
argdict = self.readArgs(node)
n = nclass(**argdict)
n.name = node.getAttribute('name')
for mnode in self.getChildrenOf(self.getChild(node, 'Modules')):
m, inmodule, outmodule = self.readModule(mnode)
if inmodule:
n.addInputModule(m)
elif outmodule:
n.addOutputModule(m)
else:
n.addModule(m)
mconns = self.getChild(node, 'MotherConnections')
if mconns:
for mcnode in self.getChildrenOf(mconns):
m = self.readBuildable(mcnode)
self.mothers[m.name] = m
for cnode in self.getChildrenOf(self.getChild(node, 'Connections')):
c, recurrent = self.readConnection(cnode)
if recurrent:
n.addRecurrentConnection(c)
else:
n.addConnection(c)
n.sortModules()
return n
def readModule(self, mnode):
if mnode.nodeName == 'Network':
m = self.readNetwork(mnode)
else:
m = self.readBuildable(mnode)
self.modules[m.name] = m
inmodule = mnode.hasAttribute('inmodule')
outmodule = mnode.hasAttribute('outmodule')
return m, inmodule, outmodule
def readConnection(self, cnode):
c = self.readBuildable(cnode)
recurrent = cnode.hasAttribute('recurrent')
return c, recurrent
def readBuildable(self, node):
mclass = node.getAttribute('class')
argdict = self.readArgs(node)
try:
m = eval(mclass)(**argdict)
except:
print(('Could not construct', mclass))
print(('with arguments:', argdict))
return None
m.name = node.getAttribute('name')
self.readParams(node, m)
return m
def readArgs(self, node):
res = {}
for c in self.getChildrenOf(node):
val = c.getAttribute('val')
if val in self.modules:
res[str(c.nodeName)] = self.modules[val]
elif val in self.mothers:
res[str(c.nodeName)] = self.mothers[val]
elif val != '':
res[str(c.nodeName)] = eval(val)
return res
def readParams(self, node, m):
pnode = self.getChild(node, 'Parameters')
if pnode:
params = eval(pnode.firstChild.data.strip())
m._setParameters(params)
|
bsd-3-clause
|
evernote/zing
|
pootle/apps/pootle_app/management/commands/refresh_scores.py
|
1
|
2645
|
# -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
import datetime
import os
os.environ["DJANGO_SETTINGS_MODULE"] = "pootle.settings"
from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from pootle_statistics.models import ScoreLog
class Command(BaseCommand):
help = "Refresh score"
def add_arguments(self, parser):
parser.add_argument(
"--reset",
action="store_true",
dest="reset",
default=False,
help="Reset all scores to zero",
)
parser.add_argument(
"--user", action="append", dest="users", help="User to refresh",
)
def handle(self, **options):
self.stdout.write("Start running of refresh_scores command...")
User = get_user_model()
users = User.objects.all()
if options["users"]:
users = users.filter(username__in=options["users"])
if options["reset"]:
users.update(score=0)
scorelogs = ScoreLog.objects.all()
if options["users"]:
scorelogs = scorelogs.filter(user__in=users)
scorelogs.delete()
if options["users"]:
self.stdout.write("Scores for specified users were reset to 0.")
else:
self.stdout.write("Scores for all users were reset to 0.")
return
start = datetime.datetime.now()
for user_pk, username in users.values_list("pk", "username"):
self.stdout.write("Processing user %s..." % username)
scorelog_qs = ScoreLog.objects.filter(user=user_pk).select_related(
"submission", "submission__suggestion", "submission__unit"
)
user_score = 0
for scorelog in scorelog_qs.iterator():
score_delta = scorelog.get_score_delta()
translated = scorelog.get_paid_wordcounts()[0]
user_score += score_delta
ScoreLog.objects.filter(id=scorelog.id).update(
score_delta=score_delta, translated_wordcount=translated
)
self.stdout.write("Score for user %s set to %.3f" % (username, user_score))
User.objects.filter(id=user_pk).update(score=user_score)
end = datetime.datetime.now()
self.stdout.write("All done in %s." % (end - start))
|
gpl-3.0
|
aidanhs/servo
|
tests/wpt/web-platform-tests/tools/six/documentation/conf.py
|
420
|
7015
|
# -*- coding: utf-8 -*-
#
# six documentation build configuration file
import os
import sys
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.append(os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = "1.0"
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ["sphinx.ext.intersphinx"]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix of source filenames.
source_suffix = ".rst"
# The encoding of source files.
#source_encoding = "utf-8-sig"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = u"six"
copyright = u"2010-2014, Benjamin Peterson"
sys.path.append(os.path.abspath(os.path.join(".", "..")))
from six import __version__ as six_version
sys.path.pop()
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = six_version[:-2]
# The full version, including alpha/beta/rc tags.
release = six_version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ["_build"]
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "default"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'sixdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
("index", "six.tex", u"six Documentation",
u"Benjamin Peterson", "manual"),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
("index", "six", u"six Documentation",
[u"Benjamin Peterson"], 1)
]
# -- Intersphinx ---------------------------------------------------------------
intersphinx_mapping = {"py2" : ("https://docs.python.org/2/", None),
"py3" : ("https://docs.python.org/3/", None)}
|
mpl-2.0
|
salopensource/sal
|
server/plugins/munkiinfo/scripts/munkiinfo.py
|
2
|
1614
|
#!/usr/local/sal/Python.framework/Versions/3.8/bin/python3
import sys
import sal
sys.path.append('/usr/local/munki')
from munkilib import munkicommon
PREFS_TO_GET = (
'ManagedInstallDir',
'SoftwareRepoURL',
'ClientIdentifier',
'LogFile',
'LoggingLevel',
'LogToSyslog',
'InstallAppleSoftwareUpdates',
'AppleSoftwareUpdatesOnly',
'SoftwareUpdateServerURL',
'DaysBetweenNotifications',
'LastNotifiedDate',
'UseClientCertificate',
'SuppressUserNotification',
'SuppressAutoInstall',
'SuppressStopButtonOnInstall',
'PackageVerificationMode',
'FollowHTTPRedirects',
'UnattendedAppleUpdates',
'ClientCertificatePath',
'ClientKeyPath',
'LastAppleSoftwareUpdateCheck',
'LastCheckDate',
'LastCheckResult',
'LogFile',
'SoftwareRepoCACertificate',
'SoftwareRepoCAPath',
'PackageURL',
'CatalogURL',
'ManifestURL',
'IconURL',
'ClientResourceURL',
'ClientResourcesFilename',
'HelpURL',
'UseClientCertificateCNAsClientIdentifier',
'AdditionalHttpHeaders',
'SuppressLoginwindowInstall',
'InstallRequiresLogout',
'ShowRemovalDetail',
'MSULogEnabled',
'MSUDebugLogEnabled',
'LocalOnlyManifest',
'UnattendedAppleUpdates')
def main():
# Skip a manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'manualcheck':
# Manual check: skipping MunkiInfo Plugin
exit(0)
data = {pref: str(munkicommon.pref(pref)) for pref in PREFS_TO_GET}
sal.add_plugin_results('MunkiInfo', data)
if __name__ == '__main__':
main()
|
apache-2.0
|
JshWright/home-assistant
|
homeassistant/components/notify/ciscospark.py
|
19
|
1851
|
"""
Cisco Spark platform for notify component.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.ciscospark/
"""
import logging
import voluptuous as vol
from homeassistant.components.notify import (
PLATFORM_SCHEMA, BaseNotificationService, ATTR_TITLE)
from homeassistant.const import (CONF_TOKEN)
import homeassistant.helpers.config_validation as cv
REQUIREMENTS = ['ciscosparkapi==0.4.2']
_LOGGER = logging.getLogger(__name__)
CONF_ROOMID = 'roomid'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_ROOMID): cv.string,
})
# pylint: disable=unused-variable
def get_service(hass, config, discovery_info=None):
"""Get the CiscoSpark notification service."""
return CiscoSparkNotificationService(
config.get(CONF_TOKEN),
config.get(CONF_ROOMID))
class CiscoSparkNotificationService(BaseNotificationService):
"""The Cisco Spark Notification Service."""
def __init__(self, token, default_room):
"""Initialize the service."""
from ciscosparkapi import CiscoSparkAPI
self._default_room = default_room
self._token = token
self._spark = CiscoSparkAPI(access_token=self._token)
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
from ciscosparkapi import SparkApiError
try:
title = ""
if kwargs.get(ATTR_TITLE) is not None:
title = kwargs.get(ATTR_TITLE) + ": "
self._spark.messages.create(roomId=self._default_room,
text=title + message)
except SparkApiError as api_error:
_LOGGER.error("Could not send CiscoSpark notification. Error: %s",
api_error)
|
apache-2.0
|
guojianli/code-for-blog
|
2008/libcollect.py
|
14
|
14080
|
"""
libcollect.py
Provides the LibCollect class, used for collecting the various libraries
your script uses for delivery as a self-contained distribution package.
Author: Eli Bendersky (http://eli.thegreenplace.net)
License: Same as Python
Motivation:
Imagine that you've written a script that uses several libraries, some of
which you've written and some you've downloaded and installed (for example
PyYAML). You want to distribute the script to your friends and co-workers,
who already have Python installed with all the standard library. But your
script won't run on their machines, because they have neither your personal
libraries, nor PyYAML installed. So what can you do ?
* You can ask them to install PyYAML and other libraries your script uses,
and send them your own libraries. This is a lengthy and inconvenient
process.
* You can use a tool like py2exe to package your delivery. This has a
downside, however. py2exe produces large files (several MBs) and you may
not want that.
* You can painstakingly collect the libraries into a directory where your
script can find them, and package the directory together with the script.
LibCollect makes the third option trivial, by doing all the dirty work
for you.
Example:
Suppose your script is named script.py, and is located in directory $DIR
(although I'm using Unix-y notation here, it is for convenience only.
LibCollect works similarly well on Windows platforms). Follow these steps
to prepare a self-contained distribution with LibCollect:
Create a distribution setup script in the same directory. Lets assume
you call it distrib_script.py. You can easily place it in any directory
you like, I'm using the same one to make the example simpler.
Add the following to distrib_script.py (assuming that libcollect.py is
in your sys.path):
**************************************************************
import libcollect
# Create a LibCollect object
lc = libcollect.LibCollect()
# Prepare arguments for do_collect
#
# Path to the script (can be absolute or relative)
scriptname = 'script.py'
# Ask the resulting distribution to be placed in
# directory distrib
targetdir = 'distrib'
# Specify which libraries to exclude from the
# distribution (because you know they're installed
# on the target machine)
excludes = ["wx",
"pywin",
"win32api",
"win32com"]
# This does the actual work
# See the documentation of LibCollect for more options
#
lc.do_collect( scriptname,
targetdir,
excludes,
verbose=True)
**************************************************************
Now run distrib_script.py.
When it finishes running, you will see that the distrib directory
has been created in $DIR.
In $DIR/distrib you will see a file: script.py and
a directory: lib
* script.py is a loader that replaces your original script.py - this
is the program your users should run. All it does (look at the
code, it's short!) is prepare the sys.path to include the
packaged libraries, and runs your own script.py that was also
packaged into the .zip file
* lib is the distribution library, containing all the code
your script needs to run on any machine with Python installed,
and nothing else (except the modules you specified in the exclusion
list).
How to use LibCollect:
* It is most convenient to use LibCollect in the way demonstrated
in the example above. You may want to update your application from
time to time, and having a distribution script handy will turn
the preparation of a new distribution into a 5-second process.
* If you don't want to create a distribution script, you can use
a more direct method of invoking libcollect.py as a program on
your script. Call it without arguments and it will print
a usage string that will explain what you need to do.
How it works:
* LibCollect uses the standard modulefinder module to find out which
libraries are used by your script. It categorizes them into two
types: standard libraries that came with Python, and non-standard
libraries you've installed or written.
* Only libraries of the second type are included in the distribution
(bar the libraries you've explicitly asked to exclude).
* It then builds a directory with all the included libraries, in a
way that your script will be able to find them. The script itself
is also packaged into the same place.
* On request, this directory can be zipped into a single file, to
employ Python's built-in zipimport facility (but read the docstring
of the LibCollect class for more information on this)
* In the distribution directory, a new file with the name of your
script is created. It is a simple loader that uses the runpy module
to transparently load your script from the distribution library.
This way your script is not being modified (sys.path is rigged
from the loader).
Compatibility:
Written in pure Python 2.5
Tested on Windows and Linux, but should work on other platforms
where the standard Python distribution works.
Version history:
1.0 (2008.06.07): initial release
1.1 (2008.07.03): create an unzipped distribution library
by default, because of the limitations
of zipimport.
"""
from distutils.archive_util import make_zipfile
from distutils.dir_util import mkpath, create_tree
import distutils.sysconfig
import os, sys
import shutil
from modulefinder import ModuleFinder
version = "1.1"
class LibCollect(object):
""" See module documentation for an introduction and example.
Usage:
lc = LibCollect()
lc.do_collect(...)
The documentation of do_collect provides the gory details.
"""
def __init__(self):
pass
def do_collect(self, scriptname, targetdir, excludes=[], distlib='lib', zip_lib=False, verbose=False):
""" See module documentation for an introduction and example.
do_collect performs the actual work of this module.
Arguments:
scriptname Path to your top-level application file. Can be
either relative or absolute.
targetdir Path to the target directory where the packaged
distribution will be placed. The distribution
consists of a loader script and a distribution
library (either a directory or a zip file).
This directory may not exist prior to invocation.
If it exists, it will be overridden.
excludes A list of module names for exclusion from the
distribution. For example, if you know all your
users have wxPython installed, you may want to
add 'wx' to this list - it will save a lot of
space.
distlib Name of the distribution library that will be
created in targetdir.
zip_lib True if you want the distribution library to be
zipped into a single file. False if you want it
to be an uncompressed directory.
Notes:
* While saving disk space, this option is likely
to hinder the start-up performance of the
script, because Python won't pre-compile the
.py files into .pyc files after the first load
if the .py files are in a zip archive.
* Due to a limitation of zipimport (Python's
built-in importer from zip files), your script
won't work after distribution if the library
contains extensions (.pyd & .pyo) or
console-less Windows scripts (.pyw). See the
documentation of zipimport for more information.
verbose True to make do_collect print out its progress
to stdout. May be useful for the first time you
create a distribution for some application.
Returns:
Nothing. An exception may be thrown with an error message from
one of the undelying method calls.
"""
self.site_packages = os.path.normcase(distutils.sysconfig.get_python_lib(standard_lib=False))
self.standard_lib = os.path.normcase(distutils.sysconfig.get_python_lib(standard_lib=True))
self.sys_prefix = os.path.normcase(sys.prefix)
self.verbose = verbose
self.log("\nLibCollect v%s running in verbose mode\n" % version)
# Initial preparation to create the lib directory
#
if os.path.exists(targetdir):
self.log("Directory '%s' exists. Removing it." % targetdir)
shutil.rmtree(targetdir)
libdir = os.path.join(targetdir, distlib)
self.log("Creating path '%s'" % libdir)
mkpath(libdir)
# Find the modules we need to collect
#
modules = self.find_modules(scriptname, excludes, verbose)
self.log("Collecting modules into '%s'" % libdir)
# Collect the modules in the lib directory
#
for modname, modtype, modfile in modules:
modname_components = modname.split('.')
if modtype == 'm':
if len(modname_components) > 1:
new_path = os.path.join(libdir, *modname_components[0:-1])
else:
new_path = libdir
elif modtype == 'P':
new_path = os.path.join(libdir, *modname_components)
else:
assert False
mkpath(new_path)
shutil.copy(modfile, new_path)
os.chdir(targetdir)
if zip_lib:
self.log("Zipping directory '%s' into '%s'" % (libdir, libdir + '.zip'))
make_zipfile(distlib, distlib)
self.log("Removing directory '%s'" % libdir)
shutil.rmtree(distlib)
path_add = "os.path.join('" + distlib + ".zip', '" + distlib + "')"
else:
path_add = "'" + distlib + "'"
# Create the loader script
#
self.log("Writing loader script: %s" % scriptname)
loader = open(os.path.basename(scriptname), 'w')
loader_name = os.path.splitext(scriptname)[0]
loader.write("import os, sys, runpy\n")
loader.write("sys.path.insert(0, %s)\n" % path_add)
loader.write("runpy.run_module('%s', run_name=\"__main__\", alter_sys=True)\n" % loader_name)
loader.close()
def find_modules(self, scriptname, excludes=[], verbose=False):
""" Find the modules we'd want to include in the
distribution.
"""
path = sys.path[:]
path.insert(0, os.path.dirname(scriptname))
mf = ModuleFinder(path=path, excludes=excludes)
mf.run_script(scriptname)
modulenames = mf.modules.keys()
modulenames.sort()
self.log("Looking for modules used by '%s'...\n" % scriptname)
log_format = "%-2s %-30s %s"
self.log(log_format % ('', 'Module name', 'Module location'))
self.log(log_format % ('--', '-' * 30, '-' * 30))
modules = []
for name in modulenames:
m = mf.modules[name]
# builtin
#
if not m.__file__: continue
mpath = os.path.normcase(m.__file__)
# Modules in Python distribution.
# Pass on only those that live in site-packages
#
if mpath.startswith(self.site_packages):
pass
elif mpath.startswith(self.sys_prefix):
continue
type = "P" if m.__path__ else "m"
modules.append((name, type, m.__file__))
self.log(log_format % (type, name, m.__file__))
self.log("")
return modules
def log(self, msg):
if self.verbose: print msg
if __name__ == "__main__":
from optparse import OptionParser
usage = "usage: %prog [options] script"
description = "Collect the script with the libraries it uses into a distribution. See module documentation for more details"
opts = OptionParser(usage=usage, description=description)
#~ opts.add_option("-h", "--help", action="help")
opts.add_option('-t', '--targetdir', dest='targetdir',
help='place distribution into TARGETDIR')
opts.add_option('-z', '--zip_lib', dest='zip_lib', action='store_true',
help='zip the distribution library')
opts.add_option('-v', '--verbose', dest='verbose', action='store_true',
help='print progress')
opts.add_option('-e', '--exclude', dest='excludes', action='append',
help='exclude library from distribution. You can provide several of thsese')
opts.set_defaults( targetdir='distrib',
zip_lib=True,
excludes=[],
verbose=False)
(options, args) = opts.parse_args()
if len(args) != 1:
opts.print_help()
sys.exit(0)
lc = LibCollect()
lc.do_collect( args[0],
options.targetdir,
options.excludes,
distlib='lib',
verbose=options.verbose,
zip_lib=options.zip_lib)
|
unlicense
|
hunter007/django
|
tests/check_framework/test_templates.py
|
288
|
1403
|
from copy import deepcopy
from django.core.checks.templates import E001
from django.test import SimpleTestCase
from django.test.utils import override_settings
class CheckTemplateSettingsAppDirsTest(SimpleTestCase):
TEMPLATES_APP_DIRS_AND_LOADERS = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'OPTIONS': {
'loaders': ['django.template.loaders.filesystem.Loader'],
},
},
]
@property
def func(self):
from django.core.checks.templates import check_setting_app_dirs_loaders
return check_setting_app_dirs_loaders
@override_settings(TEMPLATES=TEMPLATES_APP_DIRS_AND_LOADERS)
def test_app_dirs_and_loaders(self):
"""
Error if template loaders are specified and APP_DIRS is True.
"""
self.assertEqual(self.func(None), [E001])
def test_app_dirs_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['APP_DIRS']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
def test_loaders_removed(self):
TEMPLATES = deepcopy(self.TEMPLATES_APP_DIRS_AND_LOADERS)
del TEMPLATES[0]['OPTIONS']['loaders']
with self.settings(TEMPLATES=TEMPLATES):
self.assertEqual(self.func(None), [])
|
bsd-3-clause
|
saraivaufc/askMathPlus
|
askmath/models/history/answered_questions_historic.py
|
1
|
1398
|
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext as _
#historico das respostas das questoes
class AnsweredQuestionsHistoric(models.Model):
discipline = models.ForeignKey('Discipline', related_name=_('Discipline'), verbose_name=_(u"Discipline"))
lesson = models.ForeignKey('Lesson', related_name=_('Lesson'), verbose_name=_(u"Lesson"))
question = models.ForeignKey('Question', related_name=_('Question'), verbose_name=_(u"Question"))
item = models.ForeignKey('Item', related_name=_('Item'), verbose_name=_(u"Item"))
hit = models.BooleanField(default=False, verbose_name=_(u"Hit"))
exists = models.BooleanField(default=True, verbose_name=_(u"Exists"))
creation = models.DateTimeField(verbose_name=_(u"Creation"), default=timezone.now)
def get_discipline(self):
return self.discipline
def get_lesson(self):
return self.lesson
def get_question(self):
return self.question
def get_item(self):
return self.item
def get_hit(self):
return self.hit
def __unicode__(self):
return unicode(self.discipline) + ' - ' + unicode(self.lesson) + ' - ' + unicode(self.question)
class Meta:
ordering = ['-creation']
verbose_name = _(u"Answered Question Historic")
verbose_name_plural = _(u"Answered Questions Historic")
|
gpl-2.0
|
adhoc-dev/sale-workflow
|
sale_order_type/__openerp__.py
|
13
|
2363
|
# -*- encoding: utf-8 -*-
##############################################################################
# #
# OpenERP, Open Source Management Solution. #
# #
# @author Carlos Sánchez Cifuentes <[email protected]> #
# #
# This program is free software: you can redistribute it and/or modify #
# it under the terms of the GNU Affero General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# This program is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU Affero General Public License for more details. #
# #
# You should have received a copy of the GNU Affero General Public License #
# along with this program. If not, see <http://www.gnu.org/licenses/>. #
# #
##############################################################################
{
"name": "Sale Order Types",
"version": "8.0.1.0.1",
"category": "Sales Management",
"author": "OdooMRP team, "
"Grupo Vermon, "
"AvanzOSC, "
"Serv. Tecnol. Avanzados - Pedro M. Baeza, "
"Odoo Community Association (OCA)",
"website": "http://www.odoomrp.com",
"license": "AGPL-3",
"depends": [
"sale",
"stock",
"sale_stock",
"account",
],
"demo": [
"demo/sale_order_demo.xml",
],
"data": [
"security/ir.model.access.csv",
"views/sale_order_view.xml",
"views/sale_order_type_view.xml",
"views/res_partner_view.xml",
"data/default_type.xml",
],
"installable": True,
}
|
agpl-3.0
|
keenlabs/KeenClient-Python
|
keen/tests/scoped_key_tests.py
|
1
|
1665
|
from keen import scoped_keys
from keen.tests.base_test_case import BaseTestCase
class ScopedKeyTests(BaseTestCase):
api_key = "24077ACBCB198BAAA2110EDDB673282F8E34909FD823A15C55A6253A664BE368"
bad_api_key = "24077ACBCB198BAAA2110EDDB673282F8E34909FD823A15C55A6253A664BE369"
old_api_key = "ab428324dbdbcfe744"
old_bad_api_key = "badbadbadbad"
options = {
"filters": [{
"property_name": "accountId",
"operator": "eq",
"property_value": "123456"
}]
}
def test_scoped_key_encrypts_and_decrypts(self):
encrypted = scoped_keys.encrypt(self.api_key, self.options)
decrypted = scoped_keys.decrypt(self.api_key, encrypted)
self.assert_equal(decrypted, self.options)
def test_scoped_key_fails_decryption_bad_key(self):
encrypted = scoped_keys.encrypt(self.api_key, self.options)
try:
scoped_keys.decrypt(self.bad_api_key, encrypted)
self.fail("shouldn't get here")
except ValueError as e:
self.assert_not_equal(e, None)
def test_old_scoped_key_encrypts_and_decrypts(self):
encrypted = scoped_keys.encrypt(self.old_api_key, self.options)
decrypted = scoped_keys.decrypt(self.old_api_key, encrypted)
self.assert_equal(decrypted, self.options)
def test_old_scoped_key_fails_decryption_on_bad_key(self):
encrypted = scoped_keys.encrypt(self.old_api_key, self.options)
try:
scoped_keys.decrypt(self.old_bad_api_key, encrypted)
self.fail("shouldn't get here")
except ValueError as e:
self.assert_not_equal(e, None)
|
mit
|
dlodato/AliPhysics
|
PWGMM/MC/aligenqa/aligenqa/utils.py
|
37
|
7697
|
import os
import random
import string
import subprocess
import re
from rootpy import asrootpy
from rootpy.plotting import Graph
def gen_random_name():
"""Generate a random name for temp hists"""
return ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(25))
def get_est_dirs(sums, considered_ests):
return (somedir for somedir in sums if somedir.GetName() in considered_ests)
def make_estimator_title(name):
if name == 'EtaLt05':
return '|#eta|#leq0.5'
elif name == 'EtaLt08':
return '|#eta|#leq0.8'
elif name == 'EtaLt15':
return '|#eta|#leq1.5'
elif name == 'Eta08_15':
return '0.8#leq|#eta|#leq1.5'
else:
return name
def remap_x_values(hist, corr_hist):
"""
Map the x values of hist to the y values of map_hist.
In order to do so, it is necessary that the x values of hist are also present as x-values in map_hist.
Parameters
----------
hist : Hist1D
corr_hist : Hist2D
Correlations between the quantity on hist's x-axis (also corr_hist's xaxis) and the new
quantity to plot agains (on corr_hist's y-axis.
Returns
-------
Graph
Graph of the remapped hist. Errors are ??? TODO
"""
hist = asrootpy(hist)
corr_hist = asrootpy(corr_hist)
profx = asrootpy(corr_hist.ProfileX(gen_random_name()))
rt_graph = Graph()
for i, (nch_ref_bin, counter_bin) in enumerate(zip(profx.bins(), hist.bins())):
rt_graph.SetPoint(i, nch_ref_bin.value, counter_bin.value)
xerr, yerr = nch_ref_bin.error / 2.0, counter_bin.error / 2.0
rt_graph.SetPointError(i, xerr, xerr, yerr, yerr)
return rt_graph
def remove_zero_value_points(g):
# Remove the points backwards, since the index would change if we do it forwards
# The first point has index 0!
points_to_remove = []
for i, (x, y) in enumerate(g):
if not y > 0.0:
points_to_remove.append(i)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_points_with_equal_x(g):
"""Remove all points which are on already occupied x values. Ie. The first point is kept, all later ones removed"""
points_to_remove = []
seen_x = []
for i, (x, y) in enumerate(g):
if x in seen_x:
points_to_remove.append(i)
else:
seen_x.append(x)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_points_with_x_err_gt_1NchRef(g):
npoints = g.GetN()
points_to_remove = []
for idx in xrange(0, npoints):
if g.GetErrorX(idx) > 1:
points_to_remove.append(idx)
for p in points_to_remove[::-1]:
g.RemovePoint(p)
def remove_non_mutual_points(g1, g2):
"""Remove all points with do no have a corresponding point at the same x-value in the other hist"""
points_to_remove1 = []
points_to_remove2 = []
xs1 = [p[0] for p in g1]
xs2 = [p[0] for p in g2]
for i, x in enumerate(xs1):
if x not in xs2:
points_to_remove1.append(i)
for i, x in enumerate(xs2):
if x not in xs1:
points_to_remove2.append(i)
for p in points_to_remove1[::-1]:
g1.RemovePoint(p)
for p in points_to_remove2[::-1]:
g2.RemovePoint(p)
def percentile_bin_to_binidx_bin(percentile_bin, event_counter):
"""
Converts a given percentile interval (eg. (.5, .4)) to an interval of bin numbers of the given
event_counter histogram.
Parameters
----------
percentile_bin : tuple
Two percentiles, each withing 0-1. Needs to be decreasing
event_counter : Hist1D
Distribution of events over a classifier value
Returns
-------
tuple :
two bin numbers representing the given percentile. The first bin is inclusive, the second exclusive.
Ie. The bin numbers can be used directly in SetRange
Raises
------
ValueError :
The percentile specifies a range which is not found in the given event_counter histogram. It might be too
narrow.
"""
nbins = event_counter.GetXaxis().GetNbins()
ntotal_events = event_counter.Integral(1, nbins) # .Integral is a closed interval, as far as I can tell...
# fraction of events with greater or equal classifier values; hence decreasing values
frac_events_with_geq_classifier_value = [event_counter.Integral(binidx, nbins) / float(ntotal_events)
for binidx in range(1, nbins + 1)]
# small checks:
if frac_events_with_geq_classifier_value[0] != 1:
assert(0)
if len(frac_events_with_geq_classifier_value) != nbins:
assert(0)
# produce a list of bools, the first and last True are the first and last bin index
fraction_is_in_percentile_interval = lambda fraction: percentile_bin[0] >= fraction >= percentile_bin[1]
bin_is_in_percentile_interval = map(fraction_is_in_percentile_interval, frac_events_with_geq_classifier_value)
# get the indices of the elements that are True, sorry, this is a bit ugly
indices_of_bins_in_percentile_interval = [i for i, b in enumerate(bin_is_in_percentile_interval) if b]
# return the first and last binidx of the bins in the percentile interval; +1 for root binidx shit
try:
return (indices_of_bins_in_percentile_interval[0] + 1, indices_of_bins_in_percentile_interval[-1] + 1)
except IndexError:
# print "percentiles: "
# print frac_events_with_geq_classifier_value
raise ValueError("The given percentile interval did not match any bins in the given event_counter histogram")
def download_file(alien_path, local_path):
"""
Download a file from `alien_path` to `local`
Parameters
----------
alien_path, local_path : string
Full path to files
"""
if os.path.isfile(local_path):
raise ValueError("Local file exists")
try:
os.makedirs(os.path.dirname(local_path))
except OSError:
pass
alien_path = "alien:/" + alien_path
cp_cmd = ['alien_cp', '-v', '-s', alien_path, local_path]
p = subprocess.Popen(cp_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
p.wait()
if p.returncode != 0:
print "\n", p.stdout.read()
print("An error occued while downloading {0}; "
"The broken file was deleted.".format(local_path))
try:
os.remove(local_path)
except OSError:
pass
def get_generator_name_from_train(alien_path):
"""
Extract the generator name for an `AnalysisResults.root` file on alien_path.
Parameters
----------
alien_path :
Alien path to `AnalysisResults.root`
Returns
-------
str :
Generator name as stated in the train's `env.sh` file
"""
if not alien_path.startswith("alien:"):
alien_path = "alien:/" + alien_path
path_to_env = os.path.join(os.path.split(alien_path)[0], "..", "env.sh")
cp_cmd = ['alien_cp', '-v', '-s', path_to_env, ".env.sh"]
print "copying with: %s"%cp_cmd
subprocess.check_call(cp_cmd)
with open(".env.sh") as f:
for line in f.readlines():
if "PERIOD_NAME" in line:
gen_name = re.match(".*'(.+)'", line).groups()[-1]
break
return gen_name
def get_generator_name_from_filename(fname):
"""
Deduce the generator name from the file name as asigned when the
file was downloaded. Reduce underscores with spaces.
"""
name = re.match(r'.*\d+_\d{8}-\d{4}-(.+)\.root$', fname).groups()[-1]
return name.replace("_", " ")
|
bsd-3-clause
|
danakj/chromium
|
chrome/common/extensions/docs/server2/branch_utility_test.py
|
77
|
7693
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import unittest
from branch_utility import BranchUtility, ChannelInfo
from fake_url_fetcher import FakeUrlFetcher
from object_store_creator import ObjectStoreCreator
from test_util import Server2Path
class BranchUtilityTest(unittest.TestCase):
def setUp(self):
self._branch_util = BranchUtility(
os.path.join('branch_utility', 'first.json'),
os.path.join('branch_utility', 'second.json'),
FakeUrlFetcher(Server2Path('test_data')),
ObjectStoreCreator.ForTest())
def testSplitChannelNameFromPath(self):
self.assertEquals(('stable', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stable/extensions/stuff.html'))
self.assertEquals(('dev', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'dev/extensions/stuff.html'))
self.assertEquals(('beta', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'beta/extensions/stuff.html'))
self.assertEquals(('master', 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'master/extensions/stuff.html'))
self.assertEquals((None, 'extensions/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/stuff.html'))
self.assertEquals((None, 'apps/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'apps/stuff.html'))
self.assertEquals((None, 'extensions/dev/stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'extensions/dev/stuff.html'))
self.assertEquals((None, 'stuff.html'),
self._branch_util.SplitChannelNameFromPath(
'stuff.html'))
def testNewestChannel(self):
self.assertEquals('master',
self._branch_util.NewestChannel(('master', 'dev', 'beta', 'stable')))
self.assertEquals('master',
self._branch_util.NewestChannel(('stable', 'beta', 'dev', 'master')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('stable', 'beta', 'dev')))
self.assertEquals('dev',
self._branch_util.NewestChannel(('dev', 'beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('beta', 'stable')))
self.assertEquals('beta',
self._branch_util.NewestChannel(('stable', 'beta')))
self.assertEquals('stable', self._branch_util.NewestChannel(('stable',)))
self.assertEquals('beta', self._branch_util.NewestChannel(('beta',)))
self.assertEquals('dev', self._branch_util.NewestChannel(('dev',)))
self.assertEquals('master', self._branch_util.NewestChannel(('master',)))
def testNewer(self):
oldest_stable_info = ChannelInfo('stable', '963', 17)
older_stable_info = ChannelInfo('stable', '1025', 18)
old_stable_info = ChannelInfo('stable', '1084', 19)
sort_of_old_stable_info = ChannelInfo('stable', '1500', 28)
stable_info = ChannelInfo('stable', '1547', 29)
beta_info = ChannelInfo('beta', '1599', 30)
dev_info = ChannelInfo('dev', '1612', 31)
master_info = ChannelInfo('master', 'master', 'master')
self.assertEquals(older_stable_info,
self._branch_util.Newer(oldest_stable_info))
self.assertEquals(old_stable_info,
self._branch_util.Newer(older_stable_info))
self.assertEquals(stable_info,
self._branch_util.Newer(sort_of_old_stable_info))
self.assertEquals(beta_info, self._branch_util.Newer(stable_info))
self.assertEquals(dev_info, self._branch_util.Newer(beta_info))
self.assertEquals(master_info, self._branch_util.Newer(dev_info))
# Test the upper limit.
self.assertEquals(None, self._branch_util.Newer(master_info))
def testOlder(self):
master_info = ChannelInfo('master', 'master', 'master')
dev_info = ChannelInfo('dev', '1612', 31)
beta_info = ChannelInfo('beta', '1599', 30)
stable_info = ChannelInfo('stable', '1547', 29)
old_stable_info = ChannelInfo('stable', '1500', 28)
older_stable_info = ChannelInfo('stable', '1453', 27)
oldest_stable_info = ChannelInfo('stable', '396', 5)
self.assertEquals(dev_info, self._branch_util.Older(master_info))
self.assertEquals(beta_info, self._branch_util.Older(dev_info))
self.assertEquals(stable_info, self._branch_util.Older(beta_info))
self.assertEquals(old_stable_info, self._branch_util.Older(stable_info))
self.assertEquals(older_stable_info,
self._branch_util.Older(old_stable_info))
# Test the lower limit.
self.assertEquals(None, self._branch_util.Older(oldest_stable_info))
def testGetChannelInfo(self):
master_info = ChannelInfo('master', 'master', 'master')
self.assertEquals(master_info, self._branch_util.GetChannelInfo('master'))
dev_info = ChannelInfo('dev', '1612', 31)
self.assertEquals(dev_info, self._branch_util.GetChannelInfo('dev'))
beta_info = ChannelInfo('beta', '1599', 30)
self.assertEquals(beta_info, self._branch_util.GetChannelInfo('beta'))
stable_info = ChannelInfo('stable', '1547', 29)
self.assertEquals(stable_info, self._branch_util.GetChannelInfo('stable'))
def testGetLatestVersionNumber(self):
self.assertEquals(37, self._branch_util.GetLatestVersionNumber())
def testGetBranchForVersion(self):
self.assertEquals('1500',
self._branch_util.GetBranchForVersion(28))
self.assertEquals('1453',
self._branch_util.GetBranchForVersion(27))
self.assertEquals('1410',
self._branch_util.GetBranchForVersion(26))
self.assertEquals('1364',
self._branch_util.GetBranchForVersion(25))
self.assertEquals('1312',
self._branch_util.GetBranchForVersion(24))
self.assertEquals('1271',
self._branch_util.GetBranchForVersion(23))
self.assertEquals('1229',
self._branch_util.GetBranchForVersion(22))
self.assertEquals('1180',
self._branch_util.GetBranchForVersion(21))
self.assertEquals('1132',
self._branch_util.GetBranchForVersion(20))
self.assertEquals('1084',
self._branch_util.GetBranchForVersion(19))
self.assertEquals('1025',
self._branch_util.GetBranchForVersion(18))
self.assertEquals('963',
self._branch_util.GetBranchForVersion(17))
self.assertEquals('696',
self._branch_util.GetBranchForVersion(11))
self.assertEquals('396',
self._branch_util.GetBranchForVersion(5))
def testGetChannelForVersion(self):
self.assertEquals('master',
self._branch_util.GetChannelForVersion('master'))
self.assertEquals('dev',
self._branch_util.GetChannelForVersion(31))
self.assertEquals('beta',
self._branch_util.GetChannelForVersion(30))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(26))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(22))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(18))
self.assertEquals('stable',
self._branch_util.GetChannelForVersion(14))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(32))
self.assertEquals(None,
self._branch_util.GetChannelForVersion(42))
if __name__ == '__main__':
unittest.main()
|
bsd-3-clause
|
jonathanwcrane/boto
|
boto/ec2/blockdevicemapping.py
|
149
|
6372
|
# Copyright (c) 2009-2012 Mitch Garnaat http://garnaat.org/
# Copyright (c) 2012 Amazon.com, Inc. or its affiliates. All Rights Reserved
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
class BlockDeviceType(object):
"""
Represents parameters for a block device.
"""
def __init__(self,
connection=None,
ephemeral_name=None,
no_device=False,
volume_id=None,
snapshot_id=None,
status=None,
attach_time=None,
delete_on_termination=False,
size=None,
volume_type=None,
iops=None,
encrypted=None):
self.connection = connection
self.ephemeral_name = ephemeral_name
self.no_device = no_device
self.volume_id = volume_id
self.snapshot_id = snapshot_id
self.status = status
self.attach_time = attach_time
self.delete_on_termination = delete_on_termination
self.size = size
self.volume_type = volume_type
self.iops = iops
self.encrypted = encrypted
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
lname = name.lower()
if name == 'volumeId':
self.volume_id = value
elif lname == 'virtualname':
self.ephemeral_name = value
elif lname == 'nodevice':
self.no_device = (value == 'true')
elif lname == 'snapshotid':
self.snapshot_id = value
elif lname == 'volumesize':
self.size = int(value)
elif lname == 'status':
self.status = value
elif lname == 'attachtime':
self.attach_time = value
elif lname == 'deleteontermination':
self.delete_on_termination = (value == 'true')
elif lname == 'volumetype':
self.volume_type = value
elif lname == 'iops':
self.iops = int(value)
elif lname == 'encrypted':
self.encrypted = (value == 'true')
else:
setattr(self, name, value)
# for backwards compatibility
EBSBlockDeviceType = BlockDeviceType
class BlockDeviceMapping(dict):
"""
Represents a collection of BlockDeviceTypes when creating ec2 instances.
Example:
dev_sda1 = BlockDeviceType()
dev_sda1.size = 100 # change root volume to 100GB instead of default
bdm = BlockDeviceMapping()
bdm['/dev/sda1'] = dev_sda1
reservation = image.run(..., block_device_map=bdm, ...)
"""
def __init__(self, connection=None):
"""
:type connection: :class:`boto.ec2.EC2Connection`
:param connection: Optional connection.
"""
dict.__init__(self)
self.connection = connection
self.current_name = None
self.current_value = None
def startElement(self, name, attrs, connection):
lname = name.lower()
if lname in ['ebs', 'virtualname']:
self.current_value = BlockDeviceType(self)
return self.current_value
def endElement(self, name, value, connection):
lname = name.lower()
if lname in ['device', 'devicename']:
self.current_name = value
elif lname in ['item', 'member']:
self[self.current_name] = self.current_value
def ec2_build_list_params(self, params, prefix=''):
pre = '%sBlockDeviceMapping' % prefix
return self._build_list_params(params, prefix=pre)
def autoscale_build_list_params(self, params, prefix=''):
pre = '%sBlockDeviceMappings.member' % prefix
return self._build_list_params(params, prefix=pre)
def _build_list_params(self, params, prefix=''):
i = 1
for dev_name in self:
pre = '%s.%d' % (prefix, i)
params['%s.DeviceName' % pre] = dev_name
block_dev = self[dev_name]
if block_dev.ephemeral_name:
params['%s.VirtualName' % pre] = block_dev.ephemeral_name
else:
if block_dev.no_device:
params['%s.NoDevice' % pre] = ''
else:
if block_dev.snapshot_id:
params['%s.Ebs.SnapshotId' % pre] = block_dev.snapshot_id
if block_dev.size:
params['%s.Ebs.VolumeSize' % pre] = block_dev.size
if block_dev.delete_on_termination:
params['%s.Ebs.DeleteOnTermination' % pre] = 'true'
else:
params['%s.Ebs.DeleteOnTermination' % pre] = 'false'
if block_dev.volume_type:
params['%s.Ebs.VolumeType' % pre] = block_dev.volume_type
if block_dev.iops is not None:
params['%s.Ebs.Iops' % pre] = block_dev.iops
# The encrypted flag (even if False) cannot be specified for the root EBS
# volume.
if block_dev.encrypted is not None:
if block_dev.encrypted:
params['%s.Ebs.Encrypted' % pre] = 'true'
else:
params['%s.Ebs.Encrypted' % pre] = 'false'
i += 1
|
mit
|
YongseopKim/crosswalk-test-suite
|
webapi/tct-csp-w3c-tests/csp-py/csp_font-src_cross-origin_allowed-manual.py
|
30
|
2615
|
def main(request, response):
import simplejson as json
f = file('config.json')
source = f.read()
s = json.JSONDecoder().decode(source)
url1 = "http://" + s['host'] + ":" + str(s['ports']['http'][1])
_CSP = "font-src " + url1
response.headers.set("Content-Security-Policy", _CSP)
response.headers.set("X-Content-Security-Policy", _CSP)
response.headers.set("X-WebKit-CSP", _CSP)
return """<!DOCTYPE html>
<!--
Copyright (c) 2013 Intel Corporation.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of works must retain the original copyright notice, this list
of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the original copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this work without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
Authors:
Hao, Yunfei <[email protected]>
-->
<html>
<head>
<title>CSP Test: csp_font-src_cross-origin_allowed</title>
<link rel="author" title="Intel" href="http://www.intel.com"/>
<link rel="help" href="http://www.w3.org/TR/2012/CR-CSP-20121115/#font-src"/>
<meta name="flags" content=""/>
<meta charset="utf-8"/>
<style>
@font-face {
font-family: Canvas;
src: url('""" + url1 + """/tests/csp/support/w3c/CanvasTest.ttf');
}
#test {
font-family: Canvas;
}
</style>
</head>
<body>
<p>Test passes if the two lines are different in font</p>
<div id="test">1234 ABCD</div>
<div>1234 ABCD</div>
</body>
</html> """
|
bsd-3-clause
|
palerdot/calibre
|
src/calibre/ebooks/oeb/polish/cover.py
|
2
|
15201
|
#!/usr/bin/env python
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
import shutil, re, os
from calibre.ebooks.oeb.base import OPF, OEB_DOCS, XPath, XLINK, xml2text
from calibre.ebooks.oeb.polish.replace import replace_links
from calibre.utils.magick.draw import identify
def set_azw3_cover(container, cover_path, report):
name = None
found = True
for gi in container.opf_xpath('//opf:guide/opf:reference[@href and contains(@type, "cover")]'):
href = gi.get('href')
name = container.href_to_name(href, container.opf_name)
container.remove_from_xml(gi)
if name is None or not container.has_name(name):
item = container.generate_item(name='cover.jpeg', id_prefix='cover')
name = container.href_to_name(item.get('href'), container.opf_name)
found = False
href = container.name_to_href(name, container.opf_name)
guide = container.opf_xpath('//opf:guide')[0]
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), href=href, type='cover'))
with open(cover_path, 'rb') as src, container.open(name, 'wb') as dest:
shutil.copyfileobj(src, dest)
container.dirty(container.opf_name)
report('Cover updated' if found else 'Cover inserted')
def get_azw3_raster_cover_name(container):
items = container.opf_xpath('//opf:guide/opf:reference[@href and contains(@type, "cover")]')
if items:
return container.href_to_name(items[0].get('href'))
def mark_as_cover_azw3(container, name):
href = container.name_to_href(name, container.opf_name)
found = False
for item in container.opf_xpath('//opf:guide/opf:reference[@href and contains(@type, "cover")]'):
item.set('href', href)
found = True
if not found:
for guide in container.opf_xpath('//opf:guide'):
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), href=href, type='cover'))
container.dirty(container.opf_name)
def get_raster_cover_name(container):
if container.book_type == 'azw3':
return get_azw3_raster_cover_name(container)
return find_cover_image(container, strict=True)
def get_cover_page_name(container):
if container.book_type == 'azw3':
return
return find_cover_page(container)
def set_cover(container, cover_path, report):
if container.book_type == 'azw3':
set_azw3_cover(container, cover_path, report)
else:
set_epub_cover(container, cover_path, report)
def mark_as_cover(container, name):
if name not in container.mime_map:
raise ValueError('Cannot mark %s as cover as it does not exist' % name)
mt = container.mime_map[name]
if not is_raster_image(mt):
raise ValueError('Cannot mark %s as the cover image as it is not a raster image' % name)
if container.book_type == 'azw3':
mark_as_cover_azw3(container, name)
else:
mark_as_cover_epub(container, name)
###############################################################################
# The delightful EPUB cover processing
def is_raster_image(media_type):
return media_type and media_type.lower() in {
'image/png', 'image/jpeg', 'image/jpg', 'image/gif'}
COVER_TYPES = {
'coverimagestandard', 'other.ms-coverimage-standard',
'other.ms-titleimage-standard', 'other.ms-titleimage',
'other.ms-coverimage', 'other.ms-thumbimage-standard',
'other.ms-thumbimage', 'thumbimagestandard', 'cover'}
def find_cover_image(container, strict=False):
'Find a raster image marked as a cover in the OPF'
manifest_id_map = container.manifest_id_map
mm = container.mime_map
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
item_id = meta.get('content')
name = manifest_id_map.get(item_id, None)
media_type = mm.get(name, None)
if is_raster_image(media_type):
return name
# First look for a guide item with type == 'cover'
guide_type_map = container.guide_type_map
for ref_type, name in guide_type_map.iteritems():
if ref_type.lower() == 'cover' and is_raster_image(mm.get(name, None)):
return name
if strict:
return
# Find the largest image from all possible guide cover items
largest_cover = (None, 0)
for ref_type, name in guide_type_map.iteritems():
if ref_type.lower() in COVER_TYPES and is_raster_image(mm.get(name, None)):
path = container.name_path_map.get(name, None)
if path:
sz = os.path.getsize(path)
if sz > largest_cover[1]:
largest_cover = (name, sz)
if largest_cover[0]:
return largest_cover[0]
def get_guides(container):
guides = container.opf_xpath('//opf:guide')
if not guides:
container.insert_into_xml(container.opf, container.opf.makeelement(
OPF('guide')))
guides = container.opf_xpath('//opf:guide')
return guides
def mark_as_cover_epub(container, name):
mmap = {v:k for k, v in container.manifest_id_map.iteritems()}
if name not in mmap:
raise ValueError('Cannot mark %s as cover as it is not in manifest' % name)
mid = mmap[name]
# Remove all entries from the opf that identify a raster image as cover
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
container.remove_from_xml(meta)
for ref in container.opf_xpath('//opf:guide/opf:reference[@href and @type]'):
if ref.get('type').lower() not in COVER_TYPES:
continue
name = container.href_to_name(ref.get('href'), container.opf_name)
mt = container.mime_map.get(name, None)
if is_raster_image(mt):
container.remove_from_xml(ref)
# Add reference to image in <metadata>
for metadata in container.opf_xpath('//opf:metadata'):
m = metadata.makeelement(OPF('meta'), name='cover', content=mid)
container.insert_into_xml(metadata, m)
# If no entry for titlepage exists in guide, insert one that points to this
# image
if not container.opf_xpath('//opf:guide/opf:reference[@type="cover"]'):
for guide in get_guides(container):
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', href=container.name_to_href(name, container.opf_name)))
container.dirty(container.opf_name)
def mark_as_titlepage(container, name, move_to_start=True):
if move_to_start:
for item, q, linear in container.spine_iter:
if name == q:
break
if not linear:
item.set('linear', 'yes')
if item.getparent().index(item) > 0:
container.insert_into_xml(item.getparent(), item, 0)
for ref in container.opf_xpath('//opf:guide/opf:reference[@type="cover"]'):
ref.getparent().remove(ref)
for guide in get_guides(container):
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', href=container.name_to_href(name, container.opf_name)))
container.dirty(container.opf_name)
def find_cover_page(container):
'Find a document marked as a cover in the OPF'
mm = container.mime_map
guide_type_map = container.guide_type_map
for ref_type, name in guide_type_map.iteritems():
if ref_type.lower() == 'cover' and mm.get(name, '').lower() in OEB_DOCS:
return name
def find_cover_image_in_page(container, cover_page):
root = container.parsed(cover_page)
body = XPath('//h:body')(root)
if len(body) != 1:
return
body = body[0]
images = []
for img in XPath('descendant::h:img[@src]|descendant::svg:svg/descendant::svg:image')(body):
href = img.get('src') or img.get(XLINK('href'))
if href:
name = container.href_to_name(href, base=cover_page)
images.append(name)
text = re.sub(r'\s+', '', xml2text(body))
if text or len(images) > 1:
# Document has more content than a single image
return
if images:
return images[0]
def clean_opf(container):
'Remove all references to covers from the OPF'
manifest_id_map = container.manifest_id_map
for meta in container.opf_xpath('//opf:meta[@name="cover" and @content]'):
name = manifest_id_map.get(meta.get('content', None), None)
container.remove_from_xml(meta)
if name and name in container.name_path_map:
yield name
gtm = container.guide_type_map
for ref in container.opf_xpath('//opf:guide/opf:reference[@type]'):
typ = ref.get('type', '')
if typ.lower() in COVER_TYPES:
container.remove_from_xml(ref)
name = gtm.get(typ, None)
if name and name in container.name_path_map:
yield name
container.dirty(container.opf_name)
def create_epub_cover(container, cover_path):
from calibre.ebooks.conversion.config import load_defaults
from calibre.ebooks.oeb.transforms.cover import CoverManager
ext = cover_path.rpartition('.')[-1].lower()
raster_cover_item = container.generate_item('cover.'+ext, id_prefix='cover')
raster_cover = container.href_to_name(raster_cover_item.get('href'),
container.opf_name)
with open(cover_path, 'rb') as src, container.open(raster_cover, 'wb') as dest:
shutil.copyfileobj(src, dest)
opts = load_defaults('epub_output')
keep_aspect = opts.get('preserve_cover_aspect_ratio', False)
no_svg = opts.get('no_svg_cover', False)
if no_svg:
style = 'style="height: 100%%"'
templ = CoverManager.NONSVG_TEMPLATE.replace('__style__', style)
else:
width, height = 600, 800
try:
width, height = identify(cover_path)[:2]
except:
container.log.exception("Failed to get width and height of cover")
ar = 'xMidYMid meet' if keep_aspect else 'none'
templ = CoverManager.SVG_TEMPLATE.replace('__ar__', ar)
templ = templ.replace('__viewbox__', '0 0 %d %d'%(width, height))
templ = templ.replace('__width__', str(width))
templ = templ.replace('__height__', str(height))
titlepage_item = container.generate_item('titlepage.xhtml',
id_prefix='titlepage')
titlepage = container.href_to_name(titlepage_item.get('href'),
container.opf_name)
raw = templ%container.name_to_href(raster_cover).encode('utf-8')
with container.open(titlepage, 'wb') as f:
f.write(raw)
# We have to make sure the raster cover item has id="cover" for the moron
# that wrote the Nook firmware
if raster_cover_item.get('id') != 'cover':
from calibre.ebooks.oeb.base import uuid_id
newid = uuid_id()
for item in container.opf_xpath('//*[@id="cover"]'):
item.set('id', newid)
for item in container.opf_xpath('//*[@idref="cover"]'):
item.set('idref', newid)
raster_cover_item.set('id', 'cover')
spine = container.opf_xpath('//opf:spine')[0]
ref = spine.makeelement(OPF('itemref'), idref=titlepage_item.get('id'))
container.insert_into_xml(spine, ref, index=0)
guide = container.opf_get_or_create('guide')
container.insert_into_xml(guide, guide.makeelement(
OPF('reference'), type='cover', title=_('Cover'),
href=container.name_to_href(titlepage, base=container.opf_name)))
metadata = container.opf_get_or_create('metadata')
meta = metadata.makeelement(OPF('meta'), name='cover')
meta.set('content', raster_cover_item.get('id'))
container.insert_into_xml(metadata, meta)
return raster_cover, titlepage
def remove_cover_image_in_page(container, page, cover_images):
for img in container.parsed(page).xpath('//*[local-name()="img" and @src]'):
href = img.get('src')
name = container.href_to_name(href, page)
if name in cover_images:
img.getparent().remove(img)
break
def set_epub_cover(container, cover_path, report):
cover_image = find_cover_image(container)
cover_page = find_cover_page(container)
wrapped_image = extra_cover_page = None
updated = False
log = container.log
possible_removals = set(clean_opf(container))
possible_removals
# TODO: Handle possible_removals and also iterate over links in the removed
# pages and handle possibly removing stylesheets referred to by them.
spine_items = tuple(container.spine_items)
if cover_page is None:
# Check if the first item in the spine is a simple cover wrapper
candidate = container.abspath_to_name(spine_items[0])
if find_cover_image_in_page(container, candidate) is not None:
cover_page = candidate
if cover_page is not None:
log('Found existing cover page')
wrapped_image = find_cover_image_in_page(container, cover_page)
if len(spine_items) > 1:
# Look for an extra cover page
c = container.abspath_to_name(spine_items[1])
if c != cover_page:
candidate = find_cover_image_in_page(container, c)
if candidate and candidate in {wrapped_image, cover_image}:
log('Found an extra cover page that is a simple wrapper, removing it')
# This page has only a single image and that image is the
# cover image, remove it.
container.remove_item(c)
extra_cover_page = c
spine_items = spine_items[:1] + spine_items[2:]
elif candidate is None:
# Remove the cover image if it is the first image in this
# page
remove_cover_image_in_page(container, c, {wrapped_image,
cover_image})
if wrapped_image is not None:
# The cover page is a simple wrapper around a single cover image,
# we can remove it safely.
log('Existing cover page is a simple wrapper, removing it')
container.remove_item(cover_page)
container.remove_item(wrapped_image)
updated = True
if cover_image and cover_image != wrapped_image:
# Remove the old cover image
container.remove_item(cover_image)
# Insert the new cover
raster_cover, titlepage = create_epub_cover(container, cover_path)
report('Cover updated' if updated else 'Cover inserted')
# Replace links to the old cover image/cover page
link_sub = {s:d for s, d in {
cover_page:titlepage, wrapped_image:raster_cover,
cover_image:raster_cover, extra_cover_page:titlepage}.iteritems()
if s is not None}
if link_sub:
replace_links(container, link_sub, frag_map=lambda x, y:None)
|
gpl-3.0
|
reinhrst/ycmd
|
cpp/ycm/tests/gmock/scripts/generator/cpp/keywords.py
|
1157
|
2004
|
#!/usr/bin/env python
#
# Copyright 2007 Neal Norwitz
# Portions Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""C++ keywords and helper utilities for determining keywords."""
__author__ = '[email protected] (Neal Norwitz)'
try:
# Python 3.x
import builtins
except ImportError:
# Python 2.x
import __builtin__ as builtins
if not hasattr(builtins, 'set'):
# Nominal support for Python 2.3.
from sets import Set as set
TYPES = set('bool char int long short double float void wchar_t unsigned signed'.split())
TYPE_MODIFIERS = set('auto register const inline extern static virtual volatile mutable'.split())
ACCESS = set('public protected private friend'.split())
CASTS = set('static_cast const_cast dynamic_cast reinterpret_cast'.split())
OTHERS = set('true false asm class namespace using explicit this operator sizeof'.split())
OTHER_TYPES = set('new delete typedef struct union enum typeid typename template'.split())
CONTROL = set('case switch default if else return goto'.split())
EXCEPTION = set('try catch throw'.split())
LOOP = set('while do for break continue'.split())
ALL = TYPES | TYPE_MODIFIERS | ACCESS | CASTS | OTHERS | OTHER_TYPES | CONTROL | EXCEPTION | LOOP
def IsKeyword(token):
return token in ALL
def IsBuiltinType(token):
if token in ('virtual', 'inline'):
# These only apply to methods, they can't be types by themselves.
return False
return token in TYPES or token in TYPE_MODIFIERS
|
gpl-3.0
|
caisq/tensorflow
|
tensorflow/contrib/lookup/lookup_ops_test.py
|
6
|
91453
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf.contrib.lookup.lookup."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import six
from tensorflow.contrib import lookup
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import lookup_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import saver
from tensorflow.python.training import server_lib
class HashTableOpTest(test.TestCase):
def testHashTable(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys_tensor, exported_values_tensor = table.export()
self.assertItemsEqual([b"brain", b"salad", b"surgery"],
exported_keys_tensor.eval())
self.assertItemsEqual([0, 1, 2], exported_values_tensor.eval())
def testHashTableFindHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testHashTableInitWithPythonArrays(self):
with self.test_session():
default_val = -1
keys = ["brain", "salad", "surgery"]
values = [0, 1, 2]
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
keys, values, value_dtype=dtypes.int64),
default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableInitWithNumPyArrays(self):
with self.test_session():
default_val = -1
keys = np.array(["brain", "salad", "surgery"], dtype=np.str)
values = np.array([0, 1, 2], dtype=np.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testMultipleHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table2 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table3 = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
lookup_ops.tables_initializer().run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testHashTableWithTensorDefault(self):
with self.test_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testHashTableWithSparseTensorInput(self):
with self.test_session() as sess:
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
sp_indices = [[0, 0], [0, 1], [1, 0]]
sp_shape = [2, 2]
input_tensor = sparse_tensor.SparseTensor(
constant_op.constant(sp_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "tank"]),
constant_op.constant(sp_shape, dtypes.int64))
output = table.lookup(input_tensor)
out_indices, out_values, out_shape = sess.run(output)
self.assertAllEqual([0, 1, -1], out_values)
self.assertAllEqual(sp_indices, out_indices)
self.assertAllEqual(sp_shape, out_shape)
def testSignatureMismatch(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
# Ref types do not produce a lookup signature mismatch.
input_string_ref = variables.Variable("brain")
variables.global_variables_initializer().run()
self.assertEqual(0, table.lookup(input_string_ref).eval())
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string)
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), "UNK")
def testDTypes(self):
with self.test_session():
default_val = -1
with self.assertRaises(TypeError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(["a"], [1], [dtypes.string],
dtypes.int64), default_val)
def testNotInitialized(self):
with self.test_session():
default_val = -1
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(
["a"], [1], value_dtype=dtypes.int64),
default_val)
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
with self.assertRaisesOpError("Table not initialized"):
output.eval()
def testInitializeTwice(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
table.init.run()
with self.assertRaisesOpError("Table already initialized"):
table.init.run()
def testInitializationWithInvalidDimensions(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2, 3, 4], dtypes.int64)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values), default_val)
def testMultipleSessions(self):
# Start a server
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.HashTable(
lookup.KeyValueTensorInitializer(keys, values),
default_val,
name="t1")
# Init the table in the first session.
with session1:
table.init.run()
self.assertAllEqual(3, table.size().eval())
# Init the table in the second session and verify that we do not get a
# "Table already initialized" error.
with session2:
table.init.run()
self.assertAllEqual(3, table.size().eval())
class MutableHashTableOpTest(test.TestCase):
def testMutableHashTable(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None], exported_values.get_shape().as_list())
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(exported_keys.eval())
sorted_values = np.sort(exported_values.eval())
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([0, 1, 2], sorted_values)
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=ops.Graph()) as sess:
v0 = variables.Variable(10.0, name="v0")
v1 = variables.Variable(20.0, name="v1")
default_val = -1
keys = constant_op.constant(["b", "c", "d"], dtypes.string)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
save = saver.Saver()
variables.global_variables_initializer().run()
# Check that the parameter nodes have been initialized.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=ops.Graph()) as sess:
v0 = variables.Variable(-1.0, name="v0")
v1 = variables.Variable(-1.0, name="v1")
default_val = -1
table = lookup.MutableHashTable(
dtypes.string, dtypes.int64, default_val, name="t1", checkpoint=True)
table.insert(
constant_op.constant(["a", "c"], dtypes.string),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
# Check that the parameter nodes have been restored.
self.assertEqual(10.0, v0.eval())
self.assertEqual(20.0, v1.eval())
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["a", "b", "c", "d", "e"],
dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], output.eval())
def testSharing(self):
# Start a server to store the table state
server = server_lib.Server(
{
"local0": ["localhost:0"]
}, protocol="grpc", start=True)
# Create two sessions sharing the same state
session1 = session.Session(server.target)
session2 = session.Session(server.target)
table = lookup.MutableHashTable(
dtypes.int64, dtypes.string, "-", name="t1")
# Populate the table in the first session
with session1:
self.assertAllEqual(0, table.size().eval())
keys = constant_op.constant([11, 12], dtypes.int64)
values = constant_op.constant(["a", "b"])
table.insert(keys, values).run()
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([11, 12, 13], dtypes.int64))
self.assertAllEqual([b"a", b"b", b"-"], output.eval())
# Verify that we can access the shared data from the second session
with session2:
self.assertAllEqual(2, table.size().eval())
output = table.lookup(constant_op.constant([10, 11, 12], dtypes.int64))
self.assertAllEqual([b"-", b"a", b"b"], output.eval())
def testMutableHashTableOfTensors(self):
with self.test_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
self.assertAllEqual([3, 2], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1], [2, 3], [-1, -1]], result)
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None, 2], exported_values.get_shape().as_list())
# exported data is in the order of the internal map, i.e. undefined
sorted_keys = np.sort(exported_keys.eval())
sorted_values = np.sort(exported_values.eval())
self.assertAllEqual([b"brain", b"salad", b"surgery"], sorted_keys)
self.assertAllEqual([[4, 5], [2, 3], [0, 1]], sorted_values)
def testMutableHashTableExportInsert(self):
with self.test_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table1.size().eval())
table1.insert(keys, values).run()
self.assertAllEqual(3, table1.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
expected_output = [[0, 1], [2, 3], [-1, -1]]
output1 = table1.lookup(input_string)
self.assertAllEqual(expected_output, output1.eval())
exported_keys, exported_values = table1.export()
self.assertAllEqual(3, exported_keys.eval().size)
self.assertAllEqual(6, exported_values.eval().size)
# Populate a second table from the exported data
table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table2.size().eval())
table2.insert(exported_keys, exported_values).run()
self.assertAllEqual(3, table2.size().eval())
# Verify lookup result is still the same
output2 = table2.lookup(input_string)
self.assertAllEqual(expected_output, output2.eval())
def testMutableHashTableOfTensorsInvalidShape(self):
with self.test_session():
default_val = constant_op.constant([-1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# Shape [6] instead of [3, 2]
values = constant_op.constant([0, 1, 2, 3, 4, 5], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [2,3] instead of [3, 2]
values = constant_op.constant([[0, 1, 2], [3, 4, 5]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [2, 2] instead of [3, 2]
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Shape [3, 1] instead of [3, 2]
values = constant_op.constant([[0], [2], [4]], dtypes.int64)
with self.assertRaisesOpError("Expected shape"):
table.insert(keys, values).run()
# Valid Insert
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
def testMutableHashTableInvalidDefaultValue(self):
with self.test_session():
default_val = constant_op.constant([[-1, -1]], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
with self.assertRaisesOpError("Default value must be a vector"):
self.assertAllEqual(0, table.size().eval())
def testMutableHashTableDuplicateInsert(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery", "brain"])
values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([3, 1, -1], result)
def testMutableHashTableFindHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1], [-1, -1]], result)
def testMutableHashTableInsertHighRank(self):
with self.test_session():
default_val = -1
keys = constant_op.constant([["brain", "salad"], ["surgery", "tank"]])
values = constant_op.constant([[0, 1], [2, 3]], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(4, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank", "tarkus"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, 3, -1], result)
def testMutableHashTableOfTensorsFindHighRank(self):
with self.test_session():
default_val = constant_op.constant([-1, -1, -1], dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([[0, 1, 2], [2, 3, 4], [4, 5, 6]],
dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(
[["brain", "salad"], ["tank", "tarkus"]])
output = table.lookup(input_string)
self.assertAllEqual([2, 2, 3], output.get_shape())
result = output.eval()
self.assertAllEqual(
[[[0, 1, 2], [2, 3, 4]], [[-1, -1, -1], [-1, -1, -1]]], result)
def testMultipleMutableHashTables(self):
with self.test_session() as sess:
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table1 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table2 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table3 = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table1.insert(keys, values).run()
table2.insert(keys, values).run()
table3.insert(keys, values).run()
self.assertAllEqual(3, table1.size().eval())
self.assertAllEqual(3, table2.size().eval())
self.assertAllEqual(3, table3.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testMutableHashTableWithTensorDefault(self):
with self.test_session():
default_val = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testSignatureMismatch(self):
with self.test_session():
default_val = -1
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableHashTable(dtypes.string, dtypes.int64,
default_val)
# insert with keys of the wrong type
with self.assertRaises(TypeError):
table.insert(constant_op.constant([4, 5, 6]), values).run()
# insert with values of the wrong type
with self.assertRaises(TypeError):
table.insert(keys, constant_op.constant(["a", "b", "c"])).run()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string_ref = variables.Variable("brain")
input_int64_ref = variables.Variable(-1, dtype=dtypes.int64)
variables.global_variables_initializer().run()
# Ref types do not produce an insert signature mismatch.
table.insert(input_string_ref, input_int64_ref).run()
self.assertAllEqual(3, table.size().eval())
# Ref types do not produce a lookup signature mismatch.
self.assertEqual(-1, table.lookup(input_string_ref).eval())
# lookup with keys of the wrong type
input_string = constant_op.constant([1, 2, 3], dtypes.int64)
with self.assertRaises(TypeError):
table.lookup(input_string).eval()
# default value of the wrong type
with self.assertRaises(TypeError):
lookup.MutableHashTable(dtypes.string, dtypes.int64, "UNK")
def testMutableHashTableStringFloat(self):
with self.test_session():
default_val = -1.5
keys = constant_op.constant(["brain", "salad", "surgery"])
values = constant_op.constant([0, 1.1, 2.2], dtypes.float32)
table = lookup.MutableHashTable(dtypes.string, dtypes.float32,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllClose([0, 1.1, default_val], result)
def testMutableHashTableIntFloat(self):
with self.test_session():
default_val = -1.0
keys = constant_op.constant([3, 7, 0], dtypes.int64)
values = constant_op.constant([7.5, -1.2, 9.9], dtypes.float32)
table = lookup.MutableHashTable(dtypes.int64, dtypes.float32,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([7, 0, 11], dtypes.int64)
output = table.lookup(input_string)
result = output.eval()
self.assertAllClose([-1.2, 9.9, default_val], result)
def testMutableHashTableInt64String(self):
with self.test_session():
default_val = "n/a"
keys = constant_op.constant([0, 1, 2], dtypes.int64)
values = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.MutableHashTable(dtypes.int64, dtypes.string,
default_val)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([0, 1, 3], dtypes.int64)
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual((b"brain", b"salad", b"n/a"), result)
class MutableDenseHashTableOpTest(test.TestCase):
def testBasic(self):
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64, dtypes.int64, default_value=-1, empty_key=0)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testBasicBool(self):
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([True, True, True], dtypes.bool)
table = lookup.MutableDenseHashTable(
dtypes.int64, dtypes.bool, default_value=False, empty_key=0)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([True, True, False], result)
def testLookupUnknownShape(self):
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64, dtypes.int64, default_value=-1, empty_key=0)
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
placeholder_keys = array_ops.placeholder(dtypes.int64)
output = table.lookup(placeholder_keys)
self.assertAllEqual(None, output.get_shape())
result = output.eval({placeholder_keys: [11, 12, 15]})
self.assertAllEqual([0, 1, -1], result)
def testMapStringToFloat(self):
with self.test_session():
keys = constant_op.constant(["a", "b", "c"], dtypes.string)
values = constant_op.constant([0.0, 1.1, 2.2], dtypes.float32)
default_value = constant_op.constant(-1.5, dtypes.float32)
table = lookup.MutableDenseHashTable(
dtypes.string,
dtypes.float32,
default_value=default_value,
empty_key="")
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant(["a", "b", "d"], dtypes.string)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllClose([0, 1.1, -1.5], result)
def testMapInt64ToFloat(self):
for float_dtype in [dtypes.float32, dtypes.float64]:
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0.0, 1.1, 2.2], float_dtype)
default_value = constant_op.constant(-1.5, float_dtype)
table = lookup.MutableDenseHashTable(
dtypes.int64, float_dtype, default_value=default_value, empty_key=0)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllClose([0, 1.1, -1.5], result)
def testVectorValues(self):
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([[0, 1, 2, 3], [3, 4, 5, 6], [6, 7, 8, 9]],
dtypes.int64)
default_value = constant_op.constant([-1, -2, -3, -4], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=0,
initial_num_buckets=4)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
table.insert(
constant_op.constant([14], dtypes.int64),
constant_op.constant([[2, 3, 4, 5]], dtypes.int64)).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
input_string = constant_op.constant([11, 12, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3, 4], output.get_shape())
result = output.eval()
self.assertAllEqual([[0, 1, 2, 3], [3, 4, 5, 6], [-1, -2, -3, -4]],
result)
def testVectorKeys(self):
with self.test_session():
keys = constant_op.constant([[0, 1], [1, 2], [1, 3]], dtypes.int64)
values = constant_op.constant([10, 11, 12], dtypes.int64)
empty_key = constant_op.constant([0, 3], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
table.insert(
constant_op.constant([[0, 0]], dtypes.int64),
constant_op.constant([13], dtypes.int64)).run()
self.assertAllEqual(4, table.size().eval())
self.assertAllEqual(8, len(table.export()[0].eval()))
input_string = constant_op.constant([[0, 1], [1, 2], [0, 2]],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([10, 11, -1], result)
def testResize(self):
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
initial_num_buckets=4)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(4, len(table.export()[0].eval()))
keys2 = constant_op.constant([13, 14, 15, 16, 17], dtypes.int64)
values2 = constant_op.constant([3, 4, 5, 6, 7], dtypes.int64)
table.insert(keys2, values2).run()
self.assertAllEqual(7, table.size().eval())
self.assertAllEqual(16, len(table.export()[0].eval()))
keys3 = constant_op.constant([10, 11, 12, 13, 14, 15, 16, 17, 18],
dtypes.int64)
output = table.lookup(keys3)
self.assertAllEqual([-1, 0, 1, 3, 4, 5, 6, 7, -1], output.eval())
def testExport(self):
with self.test_session():
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([1, 2, 3], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=100,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
exported_keys, exported_values = table.export()
self.assertAllEqual([None], exported_keys.get_shape().as_list())
self.assertAllEqual([None], exported_values.get_shape().as_list())
np_keys = exported_keys.eval()
np_values = exported_values.eval()
self.assertAllEqual(8, len(np_keys))
self.assertAllEqual(8, len(np_values))
# pair up keys and values, drop extra added dimension
pairs = np.dstack((np_keys.flatten(), np_values.flatten()))[0]
# sort by key
pairs = pairs[pairs[:, 0].argsort()]
self.assertAllEqual([[11, 1], [12, 2], [13, 3], [100, 0], [100, 0],
[100, 0], [100, 0], [100, 0]], pairs)
def testSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=ops.Graph()) as sess:
default_value = -1
empty_key = 0
keys = constant_op.constant([11, 12, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=ops.Graph()) as sess:
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([11, 14], dtypes.int64),
constant_op.constant([12, 24], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant([10, 11, 12, 13, 14], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([-1, 0, 1, 2, -1], output.eval())
def testVectorSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [13, 14]], dtypes.int64)
values = constant_op.constant([[0, 1], [2, 3], [4, 5]], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
default_value = constant_op.constant([-1, -2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
name="t1",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([[21, 22], [23, 24]], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([[0, 1], [2, 3], [-1, -2], [4, 5], [-1, -2]],
output.eval())
def testVectorScalarSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), "vector_scalar_save_restore")
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), "hash")
with self.test_session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
keys = constant_op.constant([[11, 12], [11, 14], [13, 14]], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
name="t2",
checkpoint=True,
initial_num_buckets=32)
save = saver.Saver()
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
val = save.save(sess, save_path)
self.assertTrue(isinstance(val, six.string_types))
self.assertEqual(save_path, val)
with self.test_session(graph=ops.Graph()) as sess:
empty_key = constant_op.constant([11, 13], dtypes.int64)
default_value = constant_op.constant(-1, dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=default_value,
empty_key=empty_key,
name="t2",
checkpoint=True,
initial_num_buckets=64)
table.insert(
constant_op.constant([[11, 12], [13, 15]], dtypes.int64),
constant_op.constant([3, 4], dtypes.int64)).run()
self.assertAllEqual(2, table.size().eval())
self.assertAllEqual(64, len(table.export()[0].eval()))
save = saver.Saver()
# Restore the saved values in the parameter nodes.
save.restore(sess, save_path)
self.assertAllEqual(3, table.size().eval())
self.assertAllEqual(32, len(table.export()[0].eval()))
input_string = constant_op.constant(
[[11, 12], [11, 14], [11, 15], [13, 14], [13, 15]], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([0, 1, -1, 2, -1], output.eval())
def testReprobe(self):
with self.test_session():
# Insert 6 keys into a table with 8 buckets.
# The values are chosen to make sure collisions occur when using GCC STL
keys = constant_op.constant([11, 12, 13, 19, 20, 21], dtypes.int64)
values = constant_op.constant([51, 52, 53, 54, 55, 56], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=0,
initial_num_buckets=8)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(6, table.size().eval())
input_string = constant_op.constant([10, 11, 12, 13, 14, 19, 20, 21, 22],
dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([9], output.get_shape())
result = output.eval()
self.assertAllEqual([-1, 51, 52, 53, -1, 54, 55, 56, -1], result)
def testCustomEmptyKey(self):
with self.test_session():
keys = constant_op.constant([11, 0, 13], dtypes.int64)
values = constant_op.constant([0, 1, 2], dtypes.int64)
table = lookup.MutableDenseHashTable(
dtypes.int64, dtypes.int64, default_value=-1, empty_key=12)
self.assertAllEqual(0, table.size().eval())
table.insert(keys, values).run()
self.assertAllEqual(3, table.size().eval())
input_string = constant_op.constant([11, 0, 15], dtypes.int64)
output = table.lookup(input_string)
self.assertAllEqual([3], output.get_shape())
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testErrors(self):
with self.test_session():
table = lookup.MutableDenseHashTable(
dtypes.int64, dtypes.int64, default_value=-1, empty_key=0)
# Inserting the empty key returns an error
keys = constant_op.constant([11, 0], dtypes.int64)
values = constant_op.constant([0, 1], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
table.insert(keys, values).run()
# Looking up the empty key returns an error
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"empty_key"):
table.lookup(keys).eval()
# Arbitrary tensors of keys are not supported
keys = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
values = constant_op.constant([[11, 0], [12, 1]], dtypes.int64)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
table.lookup(keys).eval()
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Expected key shape"):
table.insert(keys, values).run()
table2 = lookup.MutableDenseHashTable(
dtypes.int64,
dtypes.int64,
default_value=-1,
empty_key=17,
initial_num_buckets=12)
with self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Number of buckets must be"):
self.assertAllEqual(0, table2.size().eval())
class IndexTableFromFile(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def test_string_index_table_from_file(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_string_index_table_from_file_tensor_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
vocabulary_file = constant_op.constant(vocabulary_file)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(1,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_string_index_table_from_file_placeholder_filename(self):
vocabulary_file = self._createVocabFile("f2i_vocab1.txt")
with self.test_session():
vocabulary_placeholder = array_ops.placeholder(dtypes.string, [])
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_placeholder, num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
feed_dict = {vocabulary_placeholder.name: vocabulary_file}
lookup_ops.tables_initializer().run(feed_dict=feed_dict)
self.assertAllEqual((1, 2, 3), ids.eval())
self.assertEqual(0,
len(ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)))
def test_int32_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab2.txt", values=("42", "1", "-1000"))
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_file(self):
vocabulary_file = self._createVocabFile(
"f2i_vocab3.txt", values=("42", "1", "-1000"))
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1,
key_dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_file_with_default_value(self):
default_value = -42
vocabulary_file = self._createVocabFile("f2i_vocab4.txt")
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_file_with_oov_buckets(self):
vocabulary_file = self._createVocabFile("f2i_vocab5.txt")
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, num_oov_buckets=1000)
ids = table.lookup(
constant_op.constant(["salad", "surgery", "tarkus", "toccata"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual(
(
1, # From vocabulary file.
2, # From vocabulary file.
867, # 3 + fingerprint("tarkus") mod 300.
860), # 3 + fingerprint("toccata") mod 300.
ids.eval())
def test_index_table_from_file_fails_with_empty_vocabulary_file_name(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file="")
def test_index_table_from_file_fails_with_empty_vocabulary(self):
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=None)
def test_index_table_from_file_with_vocab_size_too_small(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=2)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, -1, -1), ids.eval())
self.assertEqual(2, table.size().eval())
def test_index_table_from_file_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", table.init.run)
def test_index_table_from_file_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab8.txt")
self.assertRaises(
ValueError,
lookup.index_table_from_file,
vocabulary_file=vocabulary_file,
vocab_size=0)
with self.test_session():
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), ids.eval())
self.assertEqual(3, table.size().eval())
def test_index_table_from_file_with_invalid_hashers(self):
vocabulary_file = self._createVocabFile("invalid_hasher.txt")
with self.test_session():
with self.assertRaises(TypeError):
lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=3,
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class KeyValueTensorInitializerTest(test.TestCase):
def test_string(self):
with ops.Graph().as_default(), self.test_session():
init = lookup.KeyValueTensorInitializer(
("brain", "salad", "surgery"), (0, 1, 2), dtypes.string, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.init.run()
def test_int64(self):
with ops.Graph().as_default(), self.test_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
table.init.run()
def test_int32(self):
with ops.Graph().as_default(), self.test_session():
init = lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int32, dtypes.int64)
table = lookup.HashTable(init, default_value=-1)
with self.assertRaisesRegexp(
errors_impl.OpError, "No OpKernel was registered"):
table.init.run()
class IndexTableFromTensor(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_index_table_from_tensor_with_tensor_init(self):
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
if not context.executing_eagerly():
with self.assertRaises(errors_impl.OpError):
self.evaluate(table.lookup(
constant_op.constant(("salad", "surgery", "tarkus"))))
else:
# Reinitializing a table in eager should work.
table = lookup.index_table_from_tensor(
mapping=("brain", "salad", "surgery"), num_oov_buckets=1)
self.evaluate(lookup_ops.tables_initializer())
ids = table.lookup(constant_op.constant(("salad", "surgery", "tarkus")))
self.assertAllEqual((1, 2, 3), self.evaluate(ids))
def test_int32_index_table_from_tensor_with_tensor_init(self):
with self.test_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int32)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int32))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_int64_index_table_from_tensor_with_tensor_init(self):
with self.test_session():
table = lookup.index_table_from_tensor(
mapping=(42, 1, -1000), num_oov_buckets=1, dtype=dtypes.int64)
ids = table.lookup(
constant_op.constant((1, -1000, 11), dtype=dtypes.int64))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, 3), ids.eval())
def test_index_table_from_tensor_with_default_value(self):
default_value = -42
with self.test_session():
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"], default_value=default_value)
ids = table.lookup(constant_op.constant(["salad", "surgery", "tarkus"]))
self.assertRaises(errors_impl.OpError, ids.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), ids.eval())
def test_index_table_from_tensor_missing_mapping(self):
with self.test_session():
with self.assertRaisesRegexp(ValueError, "mapping must be specified"):
lookup.index_table_from_tensor(mapping=None, num_oov_buckets=1)
def test_index_table_from_tensor_empty_mapping(self):
with self.test_session():
table = lookup.index_table_from_tensor(
mapping=np.array([], dtype=np.str_), num_oov_buckets=1)
ids = table.lookup(constant_op.constant(["salad", "surgery", "brain"]))
self.assertRaises(errors_impl.OpError, ids.eval)
with self.assertRaisesRegexp(
errors_impl.OpError, "keys and values cannot be empty"):
lookup_ops.tables_initializer().run()
def test_index_table_from_tensor_with_invalid_hashers(self):
with self.test_session():
with self.assertRaises(TypeError):
lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=1)
table = lookup.index_table_from_tensor(
mapping=["brain", "salad", "surgery"],
num_oov_buckets=1,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
self.assertRaises(ValueError, table.lookup,
constant_op.constant(["salad", "surgery", "tarkus"]))
class StringToIndexTest(test.TestCase):
def test_string_to_index(self):
with self.test_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, -1), indices.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = constant_op.constant(["hello", "hello"])
feats = constant_op.constant(["hello", "hola"])
_ = lookup.string_to_index(feats, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_string_to_index_with_default_value(self):
default_value = -42
with self.test_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
feats = constant_op.constant(["salad", "surgery", "tarkus"])
indices = lookup.string_to_index(
feats, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, indices.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((1, 2, default_value), indices.eval())
class IndexToStringTableFromFileTest(test.TestCase):
def _createVocabFile(self, basename):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(["brain", "salad", "surgery"]) + "\n")
return vocabulary_file
def test_index_to_string_table(self):
vocabulary_file = self._createVocabFile("i2f_vocab1.txt")
with self.test_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file)
features = table.lookup(constant_op.constant([0, 1, 2, 3], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_index_to_string_table_with_default_value(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_small(self):
default_value = b"NONE"
vocabulary_file = self._createVocabFile("f2i_vocab2.txt")
with self.test_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file,
vocab_size=2,
default_value=default_value)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", default_value, default_value),
features.eval())
def test_index_to_string_table_with_vocab_size_too_large(self):
vocabulary_file = self._createVocabFile("f2i_vocab6.txt")
with self.test_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=4)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
init = lookup_ops.tables_initializer()
self.assertRaisesRegexp(errors_impl.InvalidArgumentError,
"Invalid vocab_size", init.run)
def test_index_to_string_table_with_vocab_size(self):
vocabulary_file = self._createVocabFile("f2i_vocab7.txt")
with self.test_session():
table = lookup.index_to_string_table_from_file(
vocabulary_file=vocabulary_file, vocab_size=3)
features = table.lookup(constant_op.constant([1, 2, 4], dtypes.int64))
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", b"UNK"), features.eval())
class IndexToStringTableFromTensorTest(test.TestCase):
def test_index_to_string_table_from_tensor(self):
with self.test_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
features.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = constant_op.constant(["hello", "hello"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings)
indices = constant_op.constant([0, 1, 4], dtypes.int64)
features = table.lookup(indices)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), features.eval())
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
table = lookup.index_to_string_table_from_tensor(
mapping=mapping_strings, default_value=default_value)
indices = constant_op.constant([1, 2, 4], dtypes.int64)
features = table.lookup(indices)
self.assertRaises(errors_impl.OpError, features.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value),
features.eval())
class IndexToStringTest(test.TestCase):
def test_index_to_string(self):
with self.test_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([0, 1, 2, 3], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"brain", b"salad", b"surgery", b"UNK"),
feats.eval())
def test_duplicate_entries(self):
with self.test_session():
mapping_strings = constant_op.constant(["hello", "hello"])
indices = constant_op.constant([0, 1, 4], dtypes.int64)
feats = lookup.index_to_string(indices, mapping=mapping_strings)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"hello", b"hello", b"UNK"), feats.eval())
self.assertRaises(errors_impl.OpError,
lookup_ops.tables_initializer().run)
def test_index_to_string_with_default_value(self):
default_value = b"NONE"
with self.test_session():
mapping_strings = constant_op.constant(["brain", "salad", "surgery"])
indices = constant_op.constant([1, 2, 4], dtypes.int64)
feats = lookup.index_to_string(
indices, mapping=mapping_strings, default_value=default_value)
self.assertRaises(errors_impl.OpError, feats.eval)
lookup_ops.tables_initializer().run()
self.assertAllEqual((b"salad", b"surgery", default_value), feats.eval())
class InitializeTableFromFileOpTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
@test_util.run_in_graph_and_eager_modes
def testInitializeStringTable(self):
vocabulary_file = self._createVocabFile("one_column_1.txt")
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
self.evaluate(table.init)
output = table.lookup(constant_op.constant(["brain", "salad", "tank"]))
result = self.evaluate(output)
self.assertAllEqual([0, 1, -1], result)
def testInitializeInt64Table(self):
vocabulary_file = self._createVocabFile(
"one_column_int64.txt", values=("42", "1", "-1000"))
with self.test_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
table.init.run()
output = table.lookup(
constant_op.constant((42, 1, 11), dtype=dtypes.int64))
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInitializeIndexTable(self):
vocabulary_file = self._createVocabFile("one_column_2.txt")
with self.test_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.LINE_NUMBER
value_index = lookup.TextFileIndex.WHOLE_LINE
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string, value_index),
default_value)
table.init.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
output = table.lookup(input_values)
result = output.eval()
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], result)
def testMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.test_session():
default_value = -1
key_index = 1
value_index = 2
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "surgery"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([1, 5, 6], result)
def testInvalidDataTypeInMultiColumn(self):
vocabulary_file = os.path.join(self.get_temp_dir(), "three_columns.txt")
with open(vocabulary_file, "w") as f:
f.write("\n".join(["0\tbrain\t1", "1\tsalad\t5", "2\tsurgery\t6"]) + "\n")
with self.test_session():
default_value = -1
key_index = 2
value_index = 1
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("is not a valid"):
table.init.run()
def testInvalidDataType(self):
vocabulary_file = self._createVocabFile("one_column_3.txt")
with self.test_session():
default_value = "UNK"
key_index = lookup.TextFileIndex.WHOLE_LINE
value_index = lookup.TextFileIndex.LINE_NUMBER
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.int64,
key_index, dtypes.string,
value_index), default_value)
def testInvalidIndex(self):
vocabulary_file = self._createVocabFile("one_column_4.txt")
with self.test_session():
default_value = -1
key_index = 1 # second column of the line
value_index = lookup.TextFileIndex.LINE_NUMBER
table = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
key_index, dtypes.int64, value_index),
default_value)
with self.assertRaisesOpError("Invalid number of columns"):
table.init.run()
def testInitializeSameTableWithMultipleNodes(self):
vocabulary_file = self._createVocabFile("one_column_5.txt")
with self.test_session() as sess:
shared_name = "shared-one-columm"
default_value = -1
table1 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table2 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
table3 = lookup.HashTable(
lookup.TextFileInitializer(vocabulary_file, dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value,
shared_name=shared_name)
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(["brain", "salad", "tank"])
output1 = table1.lookup(input_string)
output2 = table2.lookup(input_string)
output3 = table3.lookup(input_string)
out1, out2, out3 = sess.run([output1, output2, output3])
self.assertAllEqual([0, 1, -1], out1)
self.assertAllEqual([0, 1, -1], out2)
self.assertAllEqual([0, 1, -1], out3)
def testInitializeTableWithNoFilename(self):
with self.test_session():
default_value = -1
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
"", dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testInitializeWithVocabSize(self):
with self.test_session():
default_value = -1
vocab_size = 3
vocabulary_file1 = self._createVocabFile("one_column6.txt")
table1 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file1,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Initialize from file.
table1.init.run()
self.assertEquals(vocab_size, table1.size().eval())
vocabulary_file2 = self._createVocabFile("one_column7.txt")
vocab_size = 5
table2 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file2,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
with self.assertRaisesOpError("Invalid vocab_size"):
table2.init.run()
vocab_size = 1
vocabulary_file3 = self._createVocabFile("one_column3.txt")
table3 = lookup.HashTable(
lookup.TextFileInitializer(
vocabulary_file3,
dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER,
vocab_size=vocab_size),
default_value)
# Smaller vocab size reads only vocab_size records.
table3.init.run()
self.assertEquals(vocab_size, table3.size().eval())
def testFeedVocabularyName(self):
vocabulary_file = self._createVocabFile("feed_vocabulary.txt")
with self.test_session():
default_value = -1
table = lookup.HashTable(
lookup.TextFileInitializer("old_file.txt", dtypes.string,
lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64,
lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Initialize with non existing file (old_file.txt) should fail.
# TODO(yleon): Update message, which might change per FileSystem.
with self.assertRaisesOpError("old_file.txt"):
table.init.run()
# Initialize the model feeding the vocabulary file.
filenames = ops.get_collection(ops.GraphKeys.ASSET_FILEPATHS)
table.init.run(feed_dict={filenames[0]: vocabulary_file})
input_string = constant_op.constant(["brain", "salad", "tank"])
output = table.lookup(input_string)
result = output.eval()
self.assertAllEqual([0, 1, -1], result)
def testInvalidFilenames(self):
vocabulary_file = self._createVocabFile("filename_shape.txt")
with self.test_session():
default_value = -1
# Invalid data type
other_type = constant_op.constant(1)
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
other_type, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
# Non-scalar filename
filenames = constant_op.constant([vocabulary_file, vocabulary_file])
with self.assertRaises(ValueError):
lookup.HashTable(
lookup.TextFileInitializer(
filenames, dtypes.string, lookup.TextFileIndex.WHOLE_LINE,
dtypes.int64, lookup.TextFileIndex.LINE_NUMBER),
default_value)
def testIdToStringTable(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.test_session():
default_value = "UNK"
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileStringTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.init.run()
input_values = constant_op.constant([0, 1, 2, 3], dtypes.int64)
out = table.lookup(input_values)
self.assertAllEqual([b"brain", b"salad", b"surgery", b"UNK"], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testStringToIdTable(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt")
with self.test_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, -1], out.eval())
self.assertEquals(vocab_size, table.size().eval())
def testInt64ToIdTable(self):
vocab_file = self._createVocabFile(
"feat_to_id_3.txt", values=("42", "1", "-1000"))
with self.test_session():
default_value = -1
vocab_size = 3
table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value)
table.init.run()
out = table.lookup(
constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64))
self.assertAllEqual((0, 1, 2, -1), out.eval())
self.assertEquals(vocab_size, table.size().eval())
class IdTableWithHashBucketsTest(test.TestCase):
def _createVocabFile(self, basename, values=("brain", "salad", "surgery")):
vocabulary_file = os.path.join(self.get_temp_dir(), basename)
with open(vocabulary_file, "w") as f:
f.write("\n".join(values) + "\n")
return vocabulary_file
def testStringIdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_1.txt")
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value),
oov_buckets)
table.init.run()
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
out = table.lookup(input_string)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt32IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_2.txt", ("42", "1", "-1000"))
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets,
key_dtype=dtypes.int32)
table.init.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int32)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testInt64IdTableWithHashBuckets(self):
vocab_file = self._createVocabFile("feat_to_id_3.txt", ("42", "1", "-1000"))
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size, key_dtype=dtypes.int64),
default_value),
oov_buckets)
table.init.run()
values = constant_op.constant((42, 1, -1000, 11), dtype=dtypes.int64)
out = table.lookup(values)
self.assertAllEqual([0, 1, 2, 3], out.eval())
self.assertEquals(vocab_size + oov_buckets, table.size().eval())
def testStringIdTableWithOnlyHashBucket(self):
with self.test_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(None, oov_buckets)
table.init.run()
values = constant_op.constant(("brain", "salad", "surgery"))
out = table.lookup(values)
self.assertAllEqual(
[
3, # fingerprint("brain") mod 5.
1, # fingerprint("salad") mod 5.
4 # fingerprint("surgery") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testInt32IdTableWithOnlyHashBucket(self):
with self.test_session():
oov_buckets = 5
# Set a table that only uses hash buckets, for each input value returns
# an id calculated by fingerprint("input") mod oov_buckets.
table = lookup.IdTableWithHashBuckets(
None, oov_buckets, key_dtype=dtypes.int32)
table.init.run()
input_string = constant_op.constant([42, 1, -1000], dtype=dtypes.int32)
out = table.lookup(input_string)
self.assertAllEqual(
[
1, # fingerprint("42") mod 5.
4, # fingerprint("1") mod 5.
2 # fingerprint("-1000") mod 5
],
out.eval())
self.assertEquals(oov_buckets, table.size().eval())
def testFloat64IdTableWithOnlyHashBucket(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.float64)
def testBoolIdTableWithOnlyHashBucket(self):
with self.test_session():
with self.assertRaisesRegexp(TypeError, "Invalid key_dtype"):
lookup.IdTableWithHashBuckets(
None, num_oov_buckets=5, key_dtype=dtypes.bool)
def testIdTableWithHashBucketsWithMultipleInitializers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.test_session() as sess:
default_value = -1
vocab_size = 3
oov_buckets = 3
vocab_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
table1 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.FastHashSpec,
name="table1")
table2 = lookup.IdTableWithHashBuckets(
vocab_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec((1, 2)),
name="table2")
lookup_ops.tables_initializer().run()
input_string = constant_op.constant(
["fruit", "brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string)
out2 = table2.lookup(input_string)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([5, 0, 1, 2, 5], out1)
self.assertAllEqual([5, 0, 1, 2, 3], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
test_util.assert_ops_in_graph({
"table1_Lookup/hash_bucket": "StringToHashBucketFast",
"table2_Lookup/hash_bucket": "StringToHashBucketStrong",
}, sess.graph)
def testIdTableWithHashBucketsInitializationAcrossSessions(self):
vocab_file = self._createVocabFile("feat_to_id_5.txt")
shared_name = "across-sessions"
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
table1.init.run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
out1 = table1.lookup(input_string_1)
self.assertAllEqual([0, 1, 2, 3], out1.eval())
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
# Underlying lookup table already initialized in previous session.
# No need to call table2.init.run()
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value,
shared_name=shared_name),
oov_buckets)
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out2 = table2.lookup(input_string_2)
self.assertAllEqual([3, 1, 3], out2.eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testIdTableWithHashBucketsWithMultipleInitializersDifferentDefault(self):
vocab_file = self._createVocabFile("feat_to_id_6.txt")
with self.test_session() as sess:
default_value1 = -1
vocab_size = 3
oov_buckets = 0
table1 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value1),
oov_buckets)
default_value2 = -2
table2 = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value2),
oov_buckets)
lookup_ops.tables_initializer().run()
input_string_1 = constant_op.constant(
["brain", "salad", "surgery", "UNK"])
input_string_2 = constant_op.constant(["fruit", "salad", "UNK"])
out1 = table1.lookup(input_string_1)
out2 = table2.lookup(input_string_2)
out1, out2 = sess.run([out1, out2])
self.assertAllEqual([0, 1, 2, -1], out1)
self.assertAllEqual([-2, 1, -2], out2)
self.assertEquals(vocab_size + oov_buckets, table1.size().eval())
self.assertEquals(vocab_size + oov_buckets, table2.size().eval())
def testSparseTensor(self):
vocab_file = self._createVocabFile("feat_to_id_7.txt")
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant(["brain", "salad", "brain", "surgery", "tarkus"],
dtypes.string),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=3),
-1),
1)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt32SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int32),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int32)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testInt64SparseTensor(self):
input_indices = [[0, 0], [0, 1], [2, 0], [2, 2], [3, 0]]
input_shape = [4, 4]
with self.test_session() as sess:
sp_features = sparse_tensor.SparseTensor(
constant_op.constant(input_indices, dtypes.int64),
constant_op.constant([42, 1, 42, -1000, 11], dtypes.int64),
constant_op.constant(input_shape, dtypes.int64))
table = lookup.IdTableWithHashBuckets(
lookup.HashTable(
lookup.KeyValueTensorInitializer(
(42, 1, -1000), (0, 1, 2), dtypes.int64, dtypes.int64),
-1),
1,
key_dtype=dtypes.int64)
table.init.run()
sp_ids = table.lookup(sp_features)
self.assertAllEqual([5], sp_ids.values._shape_as_list())
sp_ids_ind, sp_ids_val, sp_ids_shape = sess.run(
[sp_ids.indices, sp_ids.values, sp_ids.dense_shape])
self.assertAllEqual(input_indices, sp_ids_ind)
self.assertAllEqual([0, 1, 0, 2, 3], sp_ids_val)
self.assertAllEqual(input_shape, sp_ids_shape)
def testIdTableWithHashBucketsWithInvalidHashers(self):
vocab_file = self._createVocabFile("feat_to_id_4.txt")
with self.test_session():
default_value = -1
vocab_size = 3
oov_buckets = 1
lookup_table = lookup.HashTable(
lookup.TextFileIdTableInitializer(
vocab_file, vocab_size=vocab_size),
default_value)
with self.assertRaises(TypeError):
lookup.IdTableWithHashBuckets(
lookup_table, oov_buckets, hasher_spec=1)
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.HasherSpec("my-awesome-hash", None))
input_string = constant_op.constant(["brain", "salad", "surgery", "UNK"])
with self.assertRaises(ValueError):
table.lookup(input_string)
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([]))
with self.assertRaises(ValueError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([1, 2, 3]))
with self.assertRaises(TypeError):
table = lookup.IdTableWithHashBuckets(
lookup_table,
oov_buckets,
hasher_spec=lookup.StrongHashSpec([None, 2]))
if __name__ == "__main__":
test.main()
|
apache-2.0
|
matthaywardwebdesign/rethinkdb
|
external/gtest_1.6.0/test/gtest_throw_on_failure_test.py
|
2917
|
5766
|
#!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests Google Test's throw-on-failure mode with exceptions disabled.
This script invokes gtest_throw_on_failure_test_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import gtest_test_utils
# Constants.
# The command line flag for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE = 'gtest_throw_on_failure'
# Path to the gtest_throw_on_failure_test_ program, compiled with
# exceptions disabled.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_throw_on_failure_test_')
# Utilities.
def SetEnvVar(env_var, value):
"""Sets an environment variable to a given value; unsets it when the
given value is None.
"""
env_var = env_var.upper()
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def Run(command):
"""Runs a command; returns True/False if its exit code is/isn't 0."""
print 'Running "%s". . .' % ' '.join(command)
p = gtest_test_utils.Subprocess(command)
return p.exited and p.exit_code == 0
# The tests. TODO([email protected]): refactor the class to share common
# logic with code in gtest_break_on_failure_unittest.py.
class ThrowOnFailureTest(gtest_test_utils.TestCase):
"""Tests the throw-on-failure mode."""
def RunAndVerify(self, env_var_value, flag_value, should_fail):
"""Runs gtest_throw_on_failure_test_ and verifies that it does
(or does not) exit with a non-zero code.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
should_fail: True iff the program is expected to fail.
"""
SetEnvVar(THROW_ON_FAILURE, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % THROW_ON_FAILURE
else:
flag = '--%s' % THROW_ON_FAILURE
command = [EXE_PATH]
if flag:
command.append(flag)
if should_fail:
should_or_not = 'should'
else:
should_or_not = 'should not'
failed = not Run(command)
SetEnvVar(THROW_ON_FAILURE, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero '
'exit code.' %
(THROW_ON_FAILURE, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(failed == should_fail, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False)
def testThrowOnFailureEnvVar(self):
"""Tests using the GTEST_THROW_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value=None,
should_fail=True)
def testThrowOnFailureFlag(self):
"""Tests using the --gtest_throw_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value=None,
flag_value='1',
should_fail=True)
def testThrowOnFailureFlagOverridesEnvVar(self):
"""Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='0',
flag_value='1',
should_fail=True)
self.RunAndVerify(env_var_value='1',
flag_value='0',
should_fail=False)
self.RunAndVerify(env_var_value='1',
flag_value='1',
should_fail=True)
if __name__ == '__main__':
gtest_test_utils.Main()
|
agpl-3.0
|
jbuchbinder/youtube-dl
|
youtube_dl/extractor/democracynow.py
|
22
|
3016
|
# coding: utf-8
from __future__ import unicode_literals
import re
import os.path
from .common import InfoExtractor
from ..compat import compat_urlparse
from ..utils import (
url_basename,
remove_start,
)
class DemocracynowIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?democracynow\.org/(?P<id>[^\?]*)'
IE_NAME = 'democracynow'
_TESTS = [{
'url': 'http://www.democracynow.org/shows/2015/7/3',
'md5': '3757c182d3d84da68f5c8f506c18c196',
'info_dict': {
'id': '2015-0703-001',
'ext': 'mp4',
'title': 'Daily Show',
},
}, {
'url': 'http://www.democracynow.org/2015/7/3/this_flag_comes_down_today_bree',
'info_dict': {
'id': '2015-0703-001',
'ext': 'mp4',
'title': '"This Flag Comes Down Today": Bree Newsome Scales SC Capitol Flagpole, Takes Down Confederate Flag',
'description': 'md5:4d2bc4f0d29f5553c2210a4bc7761a21',
},
'params': {
'skip_download': True,
},
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
json_data = self._parse_json(self._search_regex(
r'<script[^>]+type="text/json"[^>]*>\s*({[^>]+})', webpage, 'json'),
display_id)
title = json_data['title']
formats = []
video_id = None
for key in ('file', 'audio', 'video', 'high_res_video'):
media_url = json_data.get(key, '')
if not media_url:
continue
media_url = re.sub(r'\?.*', '', compat_urlparse.urljoin(url, media_url))
video_id = video_id or remove_start(os.path.splitext(url_basename(media_url))[0], 'dn')
formats.append({
'url': media_url,
'vcodec': 'none' if key == 'audio' else None,
})
self._sort_formats(formats)
default_lang = 'en'
subtitles = {}
def add_subtitle_item(lang, info_dict):
if lang not in subtitles:
subtitles[lang] = []
subtitles[lang].append(info_dict)
# chapter_file are not subtitles
if 'caption_file' in json_data:
add_subtitle_item(default_lang, {
'url': compat_urlparse.urljoin(url, json_data['caption_file']),
})
for subtitle_item in json_data.get('captions', []):
lang = subtitle_item.get('language', '').lower() or default_lang
add_subtitle_item(lang, {
'url': compat_urlparse.urljoin(url, subtitle_item['url']),
})
description = self._og_search_description(webpage, default=None)
return {
'id': video_id or display_id,
'title': title,
'description': description,
'thumbnail': json_data.get('image'),
'subtitles': subtitles,
'formats': formats,
}
|
unlicense
|
dscotese/bitcoin
|
test/functional/test_framework/segwit_addr.py
|
18
|
4177
|
#!/usr/bin/env python3
# Copyright (c) 2017 Pieter Wuille
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Reference implementation for Bech32 and segwit addresses."""
import unittest
CHARSET = "qpzry9x8gf2tvdw0s3jn54khce6mua7l"
def bech32_polymod(values):
"""Internal function that computes the Bech32 checksum."""
generator = [0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3]
chk = 1
for value in values:
top = chk >> 25
chk = (chk & 0x1ffffff) << 5 ^ value
for i in range(5):
chk ^= generator[i] if ((top >> i) & 1) else 0
return chk
def bech32_hrp_expand(hrp):
"""Expand the HRP into values for checksum computation."""
return [ord(x) >> 5 for x in hrp] + [0] + [ord(x) & 31 for x in hrp]
def bech32_verify_checksum(hrp, data):
"""Verify a checksum given HRP and converted data characters."""
return bech32_polymod(bech32_hrp_expand(hrp) + data) == 1
def bech32_create_checksum(hrp, data):
"""Compute the checksum values given HRP and data."""
values = bech32_hrp_expand(hrp) + data
polymod = bech32_polymod(values + [0, 0, 0, 0, 0, 0]) ^ 1
return [(polymod >> 5 * (5 - i)) & 31 for i in range(6)]
def bech32_encode(hrp, data):
"""Compute a Bech32 string given HRP and data values."""
combined = data + bech32_create_checksum(hrp, data)
return hrp + '1' + ''.join([CHARSET[d] for d in combined])
def bech32_decode(bech):
"""Validate a Bech32 string, and determine HRP and data."""
if ((any(ord(x) < 33 or ord(x) > 126 for x in bech)) or
(bech.lower() != bech and bech.upper() != bech)):
return (None, None)
bech = bech.lower()
pos = bech.rfind('1')
if pos < 1 or pos + 7 > len(bech) or len(bech) > 90:
return (None, None)
if not all(x in CHARSET for x in bech[pos+1:]):
return (None, None)
hrp = bech[:pos]
data = [CHARSET.find(x) for x in bech[pos+1:]]
if not bech32_verify_checksum(hrp, data):
return (None, None)
return (hrp, data[:-6])
def convertbits(data, frombits, tobits, pad=True):
"""General power-of-2 base conversion."""
acc = 0
bits = 0
ret = []
maxv = (1 << tobits) - 1
max_acc = (1 << (frombits + tobits - 1)) - 1
for value in data:
if value < 0 or (value >> frombits):
return None
acc = ((acc << frombits) | value) & max_acc
bits += frombits
while bits >= tobits:
bits -= tobits
ret.append((acc >> bits) & maxv)
if pad:
if bits:
ret.append((acc << (tobits - bits)) & maxv)
elif bits >= frombits or ((acc << (tobits - bits)) & maxv):
return None
return ret
def decode_segwit_address(hrp, addr):
"""Decode a segwit address."""
hrpgot, data = bech32_decode(addr)
if hrpgot != hrp:
return (None, None)
decoded = convertbits(data[1:], 5, 8, False)
if decoded is None or len(decoded) < 2 or len(decoded) > 40:
return (None, None)
if data[0] > 16:
return (None, None)
if data[0] == 0 and len(decoded) != 20 and len(decoded) != 32:
return (None, None)
return (data[0], decoded)
def encode_segwit_address(hrp, witver, witprog):
"""Encode a segwit address."""
ret = bech32_encode(hrp, [witver] + convertbits(witprog, 8, 5))
if decode_segwit_address(hrp, ret) == (None, None):
return None
return ret
class TestFrameworkScript(unittest.TestCase):
def test_segwit_encode_decode(self):
def test_python_bech32(addr):
hrp = addr[:4]
self.assertEqual(hrp, "bcrt")
(witver, witprog) = decode_segwit_address(hrp, addr)
self.assertEqual(encode_segwit_address(hrp, witver, witprog), addr)
# P2WPKH
test_python_bech32('bcrt1qthmht0k2qnh3wy7336z05lu2km7emzfpm3wg46')
# P2WSH
test_python_bech32('bcrt1qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq3xueyj')
test_python_bech32('bcrt1qft5p2uhsdcdc3l2ua4ap5qqfg4pjaqlp250x7us7a8qqhrxrxfsqseac85')
|
mit
|
rew4332/tensorflow
|
tensorflow/python/kernel_tests/summary_tensor_op_test.py
|
9
|
3807
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BAvSIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for summary ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
import tensorflow as tf
from tensorflow.python.framework import tensor_util
class SummaryOpsTest(tf.test.TestCase):
def _SummarySingleValue(self, s):
summ = tf.Summary()
summ.ParseFromString(s)
self.assertEqual(len(summ.value), 1)
return summ.value[0]
def _AssertNumpyEq(self, actual, expected):
self.assertTrue(np.array_equal(actual, expected))
def testNodeNames(self):
with self.test_session() as sess:
c = tf.constant(1)
s1 = tf.summary.tensor_summary("", c, name="s1")
with tf.name_scope("foo"):
s2 = tf.summary.tensor_summary("", c, name="s2")
with tf.name_scope("zod"):
s3 = tf.summary.tensor_summary("", c, name="s3")
s4 = tf.summary.tensor_summary("", c)
summ1, summ2, summ3, summ4 = sess.run([s1, s2, s3, s4])
v1 = self._SummarySingleValue(summ1)
self.assertEqual(v1.node_name, "s1")
v2 = self._SummarySingleValue(summ2)
self.assertEqual(v2.node_name, "foo/s2")
v3 = self._SummarySingleValue(summ3)
self.assertEqual(v3.node_name, "foo/zod/s3")
v4 = self._SummarySingleValue(summ4)
self.assertEqual(v4.node_name, "foo/zod/TensorSummary")
def testScalarSummary(self):
with self.test_session() as sess:
const = tf.constant(10.0)
summ = tf.summary.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, 10)
def testStringSummary(self):
s = six.b("foobar")
with self.test_session() as sess:
const = tf.constant(s)
summ = tf.summary.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, s)
def testManyScalarSummary(self):
with self.test_session() as sess:
const = tf.ones([5, 5, 5])
summ = tf.summary.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, np.ones([5, 5, 5]))
def testManyStringSummary(self):
strings = [[six.b("foo bar"), six.b("baz")], [six.b("zoink"), six.b("zod")]]
with self.test_session() as sess:
const = tf.constant(strings)
summ = tf.summary.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, strings)
def testManyBools(self):
bools = [True, True, True, False, False, False]
with self.test_session() as sess:
const = tf.constant(bools)
summ = tf.summary.tensor_summary("foo", const)
result = sess.run(summ)
value = self._SummarySingleValue(result)
n = tensor_util.MakeNdarray(value.tensor)
self._AssertNumpyEq(n, bools)
if __name__ == "__main__":
tf.test.main()
|
apache-2.0
|
kiniou/qtile
|
libqtile/widget/textbox.py
|
2
|
2262
|
# Copyright (c) 2008, 2010 Aldo Cortesi
# Copyright (c) 2011 Mounier Florian
# Copyright (c) 2012, 2015 Tycho Andersen
# Copyright (c) 2013 Tao Sauvage
# Copyright (c) 2013 Craig Barnes
# Copyright (c) 2014 Sean Vig
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .. import bar
from . import base
class TextBox(base._TextBox):
"""
A flexible textbox that can be updated from bound keys, scripts and
qsh.
"""
defaults = [
("font", "Arial", "Text font"),
("fontsize", None, "Font pixel size. Calculated if None."),
("fontshadow", None, "font shadow color, default is None(no shadow)"),
("padding", None, "Padding left and right. Calculated if None."),
("foreground", "#ffffff", "Foreground colour."),
]
def __init__(self, text=" ", width=bar.CALCULATED, **config):
base._TextBox.__init__(self, text=text, width=width, **config)
def update(self, text):
self.text = text
self.bar.draw()
def cmd_update(self, text):
"""
Update the text in a TextBox widget.
"""
self.update(text)
def cmd_get(self):
"""
Retrieve the text in a TextBox widget.
"""
return self.text
|
mit
|
wxgeo/geophar
|
wxgeometrie/pylib/fonctions.py
|
1
|
29422
|
# -*- coding: utf-8 -*-
##########################################################################
#
# Fonctions couramment utilisees, et non implementees en Python
# (...du moins, à ma connaissance !)
#
##########################################################################
# WxGeometrie
# Dynamic geometry, graph plotter, and more for french mathematic teachers.
# Copyright (C) 2005-2013 Nicolas Pourcelot
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import re
import weakref
import sys, zlib
import os.path
import warnings, traceback, linecache
from .decorator import decorator
from .. import param # paramètres du programme
from sympy import sympify
def is_in(element, _list):
"""Teste si l'élement est dans la liste, en effectuant un test d'identité (is) et non d'égalité (==)."""
for elt in _list:
if elt is element:
return True
return False
# This is 'a lot' slower (2.4 times about) :
##def isin2(element, _list):
## u"""Teste si l'élement est dans la liste, en effectuant un test d'identité (is) et non d'égalité (==)."""
## return id(element) in (id(elt) for elt in _list)
# And this too... (2 times about on python 2.5)
##def isin3(element, _list):
## u"""Teste si l'élement est dans la liste, en effectuant un test d'identité (is) et non d'égalité (==)."""
## return any(elt is element for elt in _list)
def mreplace(main_string, list_of_strings, new_string = ""):
"""Remplace, dans "main_string", toutes les sous-chaines de "list_of_strings", par la chaine "new_string"."""
for old_string in list_of_strings:
main_string = main_string.replace(old_string, new_string)
return main_string
def recursive_replace(main_string, old_string, new_string = "", max_loops = 10000, max_len = 1000000):
"""Remplace, dans "main_string", la sous-chaîne "old_string" par "new_string", au besoin en plusieurs passes.
En fin de processus, la sous-chaîne old_string ne subsiste plus dans la chaîne.
Renvoie une erreur si le processus ne semble pas converger.
(C'est en particulier le cas si old_string est strictement inclus dans new_string)
La différence avec replace est claire sur cette exemple :
>>> from wxgeometrie.pylib.fonctions import recursive_replace
>>> "Hi HelloHello world world !".replace("Hello world", "")
'Hi Hello world !'
>>> recursive_replace("Hi HelloHello world world !", "Hello world", "")
'Hi !'
Il y a un cas particulier ou la sous-chaîne reste présente :
>>> recursive_replace("Hi HelloHello world world !", "Hello world", "Hello world")
'Hi HelloHello world world !'
"""
chaine = ""
loops = 0
while chaine != main_string:
loops += 1
chaine = main_string
main_string = main_string.replace(old_string, new_string)
if loops > max_loops:
raise RuntimeError("Nombre de passes superieur au maximum autorise.")
if len(main_string) > max_len:
raise OverflowError("Taille de la chaine superieure au maximum autorise.")
return main_string
def recursive_mreplace(main_string, list_of_strings, new_string = "", max_loops = 10000, max_len = 1000000):
"""Remplace, dans "main_string", toutes les sous-chaines de "list_of_strings" par "new_string", au besoin en plusieurs passes.
En fin de processus, la sous-chaîne old_string ne subsiste plus dans la chaîne.
Renvoie une erreur si le processus ne semble pas converger.
Voir également recursive_replace() et mreplace().
Remarque: recursive_mreplace n'est pas équivalent des appels successifs de recursive_replace().
>>> from wxgeometrie.pylib.fonctions import recursive_replace, recursive_mreplace
>>> s = "tbtbaoao"
>>> s = recursive_mreplace(s, ("to", "ba"))
>>> s
''
>>> s="tbtbaoao"
>>> for i in ("to", "ba"):
... s = recursive_replace(s, i)
>>> s
'tbtoao'
"""
chaine = ""
loops = 0
while chaine != main_string:
loops += 1
chaine = main_string
main_string = mreplace(main_string, list_of_strings, new_string)
if loops > max_loops:
raise RuntimeError("Nombre de passes superieur au maximum autorise.")
if len(main_string) > max_len:
raise OverflowError("Taille de la chaine superieure au maximum autorise.")
return main_string
def mfind(chaine, car):
n = 0
l = []
while True:
n = chaine.find(car, n)
if n == -1: break
l.append(n)
n += 1
return l
def msplit(main_string, list_of_separators):
"""Découpe la chaine "main_string", selon les séparateurs définis dans "list_of_separators"."""
return mreplace(main_string, list_of_separators[1:], list_of_separators[0]).split(list_of_separators[0])
def removeend(main_string, *substrings):
"Enlève les éventuelles occurences de substring en fin de chaine."
if substrings and '' not in substrings: # pour éviter une éventuelle boucle infinie.
run = True
while run:
run = False
for sub in substrings:
if main_string.endswith(sub):
main_string = main_string[:-len(sub)]
run = True
return main_string
def removestart(main_string, *substrings):
"Enlève les éventuelles occurences de substring en début de chaine."
if substrings and '' not in substrings: # pour éviter une éventuelle boucle infinie.
run = True
while run:
run = False
for sub in substrings:
if main_string.startswith(sub):
main_string = main_string[len(sub):]
run = True
return main_string
def no_twin(liste):
"""Elimine les doublons dans une liste.
Si tous les élements de la liste sont 'hashables', mieux vaut utiliser la fonction set."""
dico = {}
for elt in liste:
dico[id(elt)] = elt
return list(dico.values())
#def ntwin(l): return dict((id(elt), elt) for elt in l).values() # plus élégant, mais 50% plus lent ?!?
def advanced_split(main_string, separator, keep_empty_str = False, symbols = "([{}])"):
"""Découpe la chaine "main_string" de manière intelligente,
en ignorant les séparateurs compris dans un groupe entre parenthèses, crochets, accolades, guillemets.
Attention, separateur ne peut donc pas être une parenthèse, un crochet, une accolade ou un guillemet !
Par défaut, supprime également les chaines vides."""
in_string = False # est-on dans une chaine ?
in_string_sep = "'" # caractere encadrant la chaine (" ou ')
parentheses = 0 # tient le compte des parentheses ouvertes non fermees
crochets = 0 # idem pour les crochets
accolades = 0 # idem
coupures = [-1] # endroits ou il faudra couper la chaine
for i in range(len(main_string)):
a = main_string[i]
if a in ("'", '"'):
if in_string:
if in_string_sep == a: # attention, il y a 2 indicateurs de chaine (" et ')
in_string = False
else:
in_string = True
in_string_sep = a
elif a in symbols:
if a == "(" and not in_string:
parentheses += 1
elif a == ")" and not in_string:
parentheses -= 1
elif a == "[" and not in_string:
crochets += 1
elif a == "]" and not in_string:
crochets -= 1
elif a == "{" and not in_string:
accolades += 1
elif a == "}" and not in_string:
accolades -= 1
elif a == separator and not (in_string or parentheses or crochets or accolades) :
coupures.append(i)
coupures.append(None)
# chaine[i:None] retourne la fin de chaine
return [main_string[i+1:j] for i, j in zip(coupures[:-1], coupures[1:]) if main_string[i+1:j] or keep_empty_str]
def regsub(regular_exp, main_string, action = ""):
"""Transforme la chaine "main_string" :
Il applique aux parties vérifiant "regular_exp" le traitement "action".
>>> from wxgeometrie.pylib.fonctions import regsub
>>> regsub("[a-z]", "salut les amis !", "?")
'????? ??? ???? !'
>>> regsub("[a-z]+", "hello world !", lambda s: s[1:])
'ello orld !'
"""
if isinstance(action, str):
return re.sub(regular_exp, action, main_string)
else:
return re.sub(regular_exp, lambda x: action(x.group(0)), main_string)
class WeakList(weakref.WeakValueDictionary):
"""Une 'liste' de réferences faibles.
Le terme 'liste' est trompeur, la syntaxe des listes de python n'est pas implémentée,
exceptée les méthodes append(), et remove(), et la conversion en liste.
En outre, le contenu s'obtient par la méthode values().
Note:
L'implémentation de remove est un peu différente :
'remove' utilise le comparateur 'is', et non "==", ce qui fait que remove([]) ne fera jamais rien, par exemple.
(Il faut qu'il s'agisse du meme objet, et non d'un objet égal).
Sinon, il faut utiliser compare_and_remove."""
def __init__(self):
weakref.WeakValueDictionary.__init__(self)
def append(self, valeur):
u"Ajoute une valeur en fin de liste."
self[max(self.keys(), default=0) + 1] = valeur
def remove(self, valeur):
"""Supprime la valeur de la liste.
Un test d'identité (et non d'égalité) est effectué ('is' et non '==').
Si la valeur est présente plusieurs fois, elle n'est supprimée qu'une seule fois.
Si la valeur n'est pas présente, une erreur de type ValueError est émise.
"""
for key, value in self.items():
if value is valeur:
del self[key] # il faut qu'il s'agisse du même objet
return
raise ValueError(repr(valeur) + " is not in WeakList")
def compare_and_remove(self, valeur):
"""Supprime la valeur de la liste.
Un test d'égalité est effectué ('==' et non 'is').
Si la valeur est présente plusieurs fois, elle n'est supprimée qu'une seule fois.
Si la valeur n'est pas présente, une erreur de type ValueError est émise.
"""
for key, value in self.items():
if value == valeur:
del self[key] # un objet égal suffit
return
raise ValueError(repr(valeur) + " not in WeakList")
def remove_all(self, valeur):
"""Supprime la valeur de la liste.
Un test d'identité (et non d'égalité) est effectué ('is' et non '==').
Toutes les occurences de la valeur sont supprimées.
Si la valeur n'est pas présente, aucune erreur n'est émise."""
for key, value in self.items():
if value is valeur: del self[key] # il faut qu'il s'agisse du même objet
def compare_and_remove_all(self, valeur):
"""Supprime la valeur de la liste.
Un test d'égalité est effectué ('==' et non 'is').
Toutes les occurences de la valeur sont supprimées.
Si la valeur n'est pas présente, aucune erreur n'est émise."""
for key, value in self.items():
if value == valeur: del self[key] # un objet égal suffit
def __str__(self):
return str(list(self.values())) + " (WeakList)"
def __iter__(self):
return iter(self.values())
def __getitem__(self, n):
return list(self.values())[n]
def __contains__(self, item):
return item in self.values()
class WeakMultiSet(weakref.WeakKeyDictionary):
"""A WeakValueDictionary which keeps count of how many times an object was added.
The interface implements only the methods remove() and add()
to emulate a set.
When an element is removed, the count is actually decrease.
If count reaches 0, the element is discarded (key is removed).
Additionnaly, method remove_completely() discard the element
whatever the count.
This is quite similar to collect.Counter, except that entries are deleted
when count reaches 0, and weak refrences are used."""
def remove(self, elt):
self[elt] -= 1
if self[elt] == 0:
del self[elt]
def add(self, elt):
if elt in self:
self[elt] += 1
else:
self[elt] = 1
def remove_completely(self, elt):
del self[elt]
def extract_error(chaine=''):
lignes = []
if chaine:
lignes.append(chaine)
typ, val, tb = sys.exc_info()
tb = traceback.extract_tb(tb)
lignes.append('Traceback (most recent call last)')
for fichier, ligne, fonction, code in tb:
lignes.append(' File "%s", line %s, in %s'
% (str(fichier), str(ligne), str(fonction)))
if code is not None:
lignes.append(' ' + str(code))
lignes.append(typ.__name__ + ": " + str(val))
lignes.append("Warning: this error was not raised.")
return '\n'.join(lignes)
def print_error(chaine=''):
"""Affiche l'erreur sans interrompre le programme.
C'est un alias de sys.excepthook, mais qui est plus souple avec les encodages.
"""
print(extract_error(chaine))
def rstrip_(s, end):
"""Supprime récursivement 'end' de la fin de la chaîne 's'.
>>> from wxgeometrie.pylib.fonctions import rstrip_
>>> rstrip_('blabla_suffixe_fixe_suffixe_suffixe', '_suffixe')
'blabla_suffixe_fixe'
Nota :
* ne pas confondre avec str.rstrip() :
>>> 'blabla_suffixe_fixe_suffixe_suffixe'.rstrip('_suffixe')
'blabla'
* si end == '', la chaîne de départ est retournée :
>>> rstrip_('bonjour', '')
'bonjour'
"""
if not end:
return s
i = -len(end)
while s.endswith(end):
s = s[:i]
return s
# http://stackoverflow.com/questions/2556108/rreplace-how-to-replace-the-last-occurence-of-an-expression-in-a-string
def rreplace(s, old, new, count):
"""rreplace (s, old, new, count) -> string
Return a copy of string S with the first count occurrences of substring
old replaced by new, starting from right to left."""
return new.join(s.rsplit(old, count))
def split_geoname(name):
"""Tente de décomposer un nom d'objet géométrique en plusieurs noms.
Ex:
1) "AB" -> ("A","B")
2) "A12B" -> ("A12","B")
3) "AB1" -> ("A","B1")
4) "A'B\"" -> ("A'", "B\"")
5) "ABC" -> ("A", "B", "C")
"""
return tuple(nom.strip() for nom in re.split("""([ ]*[A-Za-z][_]?[0-9"']*[ ]*)""", name) if nom)
def convert_geoname(name, level = 0):
"""Convertit le nom entré par l'utilisateur en un nom réellement interprétable.
Une conversion de niveau 1 est appliquée dans les boîtes de dialogue.
Une conversion de niveau 0 est appliquée dans la console."""
if level > 0:
if level > 1:
if " " not in name:
name = " ".join(split_geoname)
name = name.replace('"', "''")
name = name.replace("'''", "_tierce")
name = name.replace("''", "_seconde")
name = name.replace("'", "_prime")
name = name.replace("```", "_tierce")
name = name.replace("``", "_seconde")
name = name.replace("`", "_prime")
def split_around_parenthesis(main_string, position = 0, leftbracket = "("):
"""Coupe le premier groupe entre parentheses rencontré, en tenant compte des guillemets.
'leftbracket' peut prendre les valeurs "(", "[" ou "{"
La parenthese ouvrante du groupe sera la première trouvée à droite de 'position'
Exemple: '1+4*(5+3*(2+7)+2-")")*7+(2-3)+4' -> ['1+4*', '(5+3*(2+7)+2-")")', '*7+(2-3)+4']
"""
in_string = False # est-on dans une chaine ?
in_string_sep = "'" # caractere encadrant la chaine (" ou ')
position = main_string.find(leftbracket, position)
if position == -1:
return (main_string,)
parentheses = 1 # tient le compte des parentheses ouvertes non fermees
rightbracket = {"(": ")", "[": "]", "{": "}"}[leftbracket]
prefixe = main_string[:position]
chaine = main_string[position + 1:]
for i in range(len(chaine)):
a = chaine[i]
if a in ("'", '"'):
if in_string:
if in_string_sep == a: # attention, il y a 2 indicateurs de chaine (" et ')
in_string = False
else:
in_string = True
in_string_sep = a
elif a == leftbracket and not in_string:
parentheses += 1
elif a == rightbracket and not in_string:
parentheses -= 1
if parentheses == 0:
chaine = chaine
return (prefixe, leftbracket + chaine[:i + 1], chaine[i + 1:])
return (main_string,) # aucune parenthese fermante n'a été trouvée pour ce groupe.
def find_closing_bracket(expr, start = 0, brackets = '{}'):
expr_deb = expr[:min(len(expr), 30)]
# for debugging
index = 0
balance = 1
# None if we're not presently in a string
# Else, string_type may be ', ''', ", or """
string_type = None
reg = re.compile('["' + brackets + "']") # ', ", { and } matched
open_bracket = brackets[0]
close_bracket = brackets[1]
if start:
expr = expr[start:]
while balance:
m = re.search(reg, expr)
#~ print 'scan:', m
if m is None:
break
result = m.group()
i = m.start()
if result == open_bracket:
if string_type is None:
balance += 1
elif result == close_bracket:
if string_type is None:
balance -= 1
# Brackets in string should not be recorded...
# so, we have to detect if we're in a string at the present time.
elif result in ("'", '"'):
if string_type is None:
if expr[i:].startswith(3*result):
string_type = 3*result
i += 2
else:
string_type = result
elif string_type == result:
string_type = None
elif string_type == 3*result:
if expr[i:].startswith(3*result):
string_type = None
i += 2
i += 1 # counting the current caracter as already scanned text
index += i
expr = expr[i:]
else:
return start + index - 1 # last caracter is the searched bracket :-)
raise ValueError('unbalanced brackets (%s) while scanning %s...' %(balance, repr(expr_deb)))
def warning(message, type_warning = Warning, level=0):
if param.warning:
warnings.warn(message, type_warning, stacklevel = (level + 3))
def deprecation(message, level=0):
warnings.warn(message, DeprecationWarning, stacklevel = (level + 3))
def path2(chemin):
"""Transforme le chemin en remplaçant les / et \\ selon le séparateur utilisé par le système.
% est remplacé par l'emplacement du programme (contenu dans param.EMPLACEMENT).
Exemple : path2("%/wxgeometrie/images/archives/old.png").
~ fait référence au répertoire personnel de l'utilisateur (ex: /home/SteveB/ sous Linux.
"""
return os.path.normpath(os.path.expanduser(chemin.replace("%", param.EMPLACEMENT)))
# L'idée de compiler en une fois pour toute les expressions regulières n'est pas avantageuse,
# car python le fait déjà automatiquement pour celles utilisées le plus souvent.
#~ def regsub(regular_exp, main_string, action = ""):
#~ u"""Transforme la chaine "main_string" :
#~ Il applique aux parties vérifiant "regular_exp" le traitement "action".
#~ >>> regsub("[a-z]", "salut les amis !", "?")
#~ '????? ??? ???? !'
#~ >>> regsub("[a-z]+", "hello world !", lambda s: s[1:])
#~ 'ello orld !'
#~ """
#~ if isinstance(regular_exp, basestring):
#~ if isinstance(action, basestring):
#~ return re.sub(regular_exp, action, main_string)
#~ else:
#~ return re.sub(regular_exp, lambda x: action(x.group(0)), main_string)
#~ else:
#~ if isinstance(action, basestring):
#~ return regular_exp.sub(action, main_string)
#~ else:
#~ return regular_exp.sub(lambda x: action(x.group(0)), main_string)
#~ class REStorageDict(dict):
#~ u"""Un dictionnaire qui stocke les RE sous forme compilée.
#~ """
#~ def __getitem__(self, name):
#~ try:
#~ return dict.__getitem__(self, name)
#~ except KeyError:
#~ value = re.compile(name)
#~ self.__setitem__(name, value)
#~ return value
#~ class WeakRef(weakref.ref):
#~ """WeakRef surclasse weakref.ref en modifiant sa méthode '__eq__'.
#~ a == b <=> type(a) == type(b) == WeakRef and a() is b().
#~ Le but est de ne pas appeler les méthodes __eq__ des objets référencés."""
#~ def __eq__(self, y):
#~ if not isinstance(y, WeakRef):
#~ return False
#~ if self() is None or y() is None:
#~ return self is y
#~ return self() is y()
#~ def __hash__(self):
#~ return id(self())
#~ class CustomWeakKeyDictionary(weakref.WeakKeyDictionary):
#~ """WeakKeyDictionary utilisant Weakref au lieu de weakref.ref.
#~ """
#~ def __delitem__(self, key):
#~ del self.data[WeakRef(key)]
#~ def __getitem__(self, key):
#~ return self.data[WeakRef(key)]
#~ def __repr__(self):
#~ return "<WeakKeyDictionary at %s>" % id(self)
#~ def __setitem__(self, key, value):
#~ self.data[WeakRef(key, self._remove)] = value
#~ def copy(self):
#~ new = CustomWeakKeyDictionary()
#~ for key, value in self.data.items():
#~ o = key()
#~ if o is not None:
#~ new[o] = value
#~ return new
#~ def get(self, key, default=None):
#~ return self.data.get(WeakRef(key),default)
#~ def has_key(self, key):
#~ try:
#~ wr = WeakRef(key)
#~ except TypeError:
#~ return 0
#~ return wr in self.data
#~ def __contains__(self, key):
#~ try:
#~ wr = WeakRef(key)
#~ except TypeError:
#~ return 0
#~ return wr in self.data
#~ def pop(self, key, *args):
#~ return self.data.pop(WeakRef(key), *args)
#~ def setdefault(self, key, default=None):
#~ return self.data.setdefault(WeakRef(key, self._remove),default)
#~ def update(self, dict=None, **kwargs):
#~ d = self.data
#~ if dict is not None:
#~ if not hasattr(dict, "items"):
#~ dict = type({})(dict)
#~ for key, value in dict.items():
#~ d[WeakRef(key, self._remove)] = value
#~ if len(kwargs):
#~ self.update(kwargs)
def debug(*messages):
"""Affiche un (ou plusieurs) message(s) si le déboguage est actif."""
if param.debug:
for message in messages:
print(message)
@decorator
def trace(f, *args, **kw):
if param.debug:
print("Calling %s with args %s, %s" % (f.__name__, args, kw))
return f(*args, **kw)
@decorator
def full_trace(f, *args, **kw):
if param.debug:
print('** Debugging info **')
traceback.print_stack()
print("Calling %s with args %s, %s" % (f.__name__, args, kw))
print('-------------------\n')
return f(*args, **kw)
def deprecated(message = ''):
"A decorator for deprecated functions"
@decorator
def _deprecated(func, *args, **kw):
"A decorator for deprecated functions"
warnings.warn('\n'.join(('La fonction %r est desuette.' %func.__name__, message)),
DeprecationWarning, stacklevel = 3)
return func(*args, **kw)
return _deprecated
##@decorator
##def deprecated(func, *args, **kw):
## "A decorator for deprecated functions"
## warnings.warn(
## ('Calling the deprecated function %r\n'
## 'Downgrade to decorator 2.3 if you want to use this functionality')
## % func.__name__, DeprecationWarning, stacklevel=3)
## return func(*args, **kw)
def traceit(frame, event, arg):
"""'Trace' (suit) une fonction python.
Usage:
import sys
sys.settrace(traceit)"""
if event == "line":
lineno = frame.f_lineno
filename = frame.f_globals["__file__"]
if (filename.endswith(".pyc") or
filename.endswith(".pyo")):
filename = filename[:-1]
name = frame.f_globals["__name__"]
line = linecache.getline(filename, lineno)
print("%s:%s: %s" % (name, lineno, line.rstrip()))
return traceit
def tracer_(booleen = True):
if booleen:
sys.settrace(traceit)
else:
sys.settrace(None)
def property2(fonction):
return property(fonction, fonction)
def _archive(string):
return zlib.compress(string.encode('utf8'))
def _extract(data):
return zlib.decompress(data).decode('utf8')
class CompressedList(list):
def append(self, s):
list.append(self, _archive(s))
def __getitem__(self, i):
return _extract(list.__getitem__(self, i))
def __setitem__(self, i, s):
list.__setitem__(self, i, _archive(s))
def remove(self, s):
list.remove(self, _archive(s))
def count(self, s):
return list.count(self, _archive(s))
def extend(self, iterable):
list.extend(self, (_archive(s) for s in iterable))
def index(self, s):
list.index(self, _archive(s))
def insert(self, i, s):
list.insert(self, i, _archive(s))
def pop(self, i = -1):
return _extract(list.pop(self, i))
def pstfunc(chaine):
args = []
dict_op = {'mul':'*','add':'+','exp':'**','div':'/','sub':'-'}
dict_fn = {'ln':'ln'}
def code_arg(s):
return '(' + str(sympify(s)) + ')'
for s in chaine.split(' '):
if s in dict_op:
args = [code_arg(dict_op[s].join(args))]
elif s in dict_fn:
assert len(args) == 1
args = [code_arg(dict_fn + '(' + args[0] + ')')]
elif s:
args.append(code_arg(s))
assert len(args) == 1
return args[0]
class NoArgument(object):
'''Utilisé comme valeur par défaut, pour savoir si un argument optionnel
a été passé. Une seule instance peut-être crée.'''
__instance = None
def __new__(cls):
if cls.__instance is None:
cls.__instance = object.__new__(cls)
return cls.__instance
no_argument = NoArgument()
class OrderedDict(dict):
def __init__(self, seq = ()):
self.__keys = []
dict.__init__(self)
for key, val in seq:
self[key] = val
def __setitem__(self, key, value):
if key not in self:
self.__keys.append(key)
dict.__setitem__(self, key, value)
def __delitem__(self, key):
dict.__delitem__(self, key)
self.__keys.remove(key)
def __iter__(self):
return iter(self.__keys)
def __repr__(self):
return "MyOrderedDict(%s)"%repr(list(self.items()))
def keys(self):
return self.__keys[:]
def values(self):
return [self[key] for key in self.__keys]
def items(self):
return [(key, self[key]) for key in self.__keys]
def copy(self):
return self.__class__(iter(self.items()))
def iterkeys(self):
return iter(self)
def iteritems(self):
return ((key, self[key]) for key in self.__keys)
def itervalues(self):
return (self[key] for key in self.__keys)
def update(self, E, **F):
if hasattr(E, 'keys'):
for k in E:
self[k] = E[k]
else:
for (k, v) in E:
self[k] = v
for k in F:
self[k] = F[k]
def setdefaut(self, k, d = None):
if k not in self:
self[k] = d
return self[k]
def clear(self):
del self.__keys[:]
dict.clear(self)
def pop(self, k, d=no_argument):
try:
v = dict.pop(self, k)
self.__keys.remove(k)
return v
except KeyError:
if d is no_argument:
raise
return d
def __reversed__(self):
return reversed(self.__keys)
def popitem(self):
if not self:
raise KeyError('dictionary is empty')
key = self.__keys.pop()
value = dict.pop(self, key)
return key, value
|
gpl-2.0
|
dlazz/ansible
|
test/units/modules/network/edgeswitch/test_edgeswitch_vlan.py
|
29
|
5640
|
# (c) 2018 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.edgeswitch import edgeswitch_vlan
from ansible.modules.network.edgeswitch.edgeswitch_vlan import parse_vlan_brief, parse_interfaces_switchport
from units.modules.utils import set_module_args
from .edgeswitch_module import TestEdgeswitchModule, load_fixture
class TestEdgeswitchVlanModule(TestEdgeswitchModule):
module = edgeswitch_vlan
def setUp(self):
super(TestEdgeswitchVlanModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.edgeswitch.edgeswitch_vlan.run_commands')
self.run_commands = self.mock_run_commands.start()
self.mock_load_config = patch('ansible.modules.network.edgeswitch.edgeswitch_vlan.load_config')
self.load_config = self.mock_load_config.start()
def tearDown(self):
super(TestEdgeswitchVlanModule, self).tearDown()
self.mock_run_commands.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for command in commands:
if command.startswith('vlan ') or command == 'exit':
output.append('')
else:
filename = str(command).split(' | ')[0].replace(' ', '_')
output.append(load_fixture('edgeswitch_vlan_%s' % filename))
return output
self.run_commands.side_effect = load_from_file
self.load_config.return_value = {}
def test_edgeswitch_vlan_create(self):
set_module_args({'vlan_id': '200', 'name': 'video', 'state': 'present'})
result = self.execute_module(changed=True)
expected_commands = [
'vlan database',
'vlan 200',
'vlan name 200 \"video\"',
'exit'
]
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_id_startwith_100(self):
set_module_args({'vlan_id': '100', 'name': 'voice', 'state': 'present'})
result = self.execute_module(changed=False)
expected_commands = []
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_rename(self):
set_module_args({'vlan_id': '100', 'name': 'video', 'state': 'present'})
result = self.execute_module(changed=True)
expected_commands = [
'vlan database',
'vlan name 100 \"video\"',
'exit'
]
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_with_interfaces_range(self):
set_module_args({'vlan_id': '100', 'name': 'voice', 'state': 'present', 'tagged_interfaces': ['0/6-0/8']})
result = self.execute_module(changed=True)
expected_commands = [
'interface 0/6-0/8',
'vlan participation include 100',
'vlan tagging 100',
]
self.assertEqual(result['commands'], expected_commands)
def test_edgeswitch_vlan_with_interfaces_and_newvlan(self):
set_module_args({'vlan_id': '3', 'name': 'vlan3', 'state': 'present', 'untagged_interfaces': ['0/8', '0/7']})
result = self.execute_module(changed=True)
expected_commands = [
'vlan database',
'vlan 3',
'vlan name 3 \"vlan3\"',
'exit',
'interface 0/7-0/8',
'vlan participation include 3',
'vlan pvid 3',
]
self.assertEqual(result['commands'], expected_commands)
def test_parse_interfaces_switchport(self):
result = parse_interfaces_switchport(load_fixture('edgeswitch_vlan_show_interfaces_switchport'))
i1 = {
'interface': '0/1',
'pvid_mode': '1',
'untagged_vlans': ['1'],
'tagged_vlans': ['100'],
'forbidden_vlans': [''],
}
i3 = {
'interface': '0/3',
'pvid_mode': '1',
'untagged_vlans': [''],
'tagged_vlans': ['100'],
'forbidden_vlans': ['1'],
}
i5 = {
'interface': '0/5',
'pvid_mode': '100',
'untagged_vlans': ['100'],
'tagged_vlans': [''],
'forbidden_vlans': [''],
}
self.assertEqual(result['0/1'], i1)
self.assertEqual(result['0/3'], i3)
self.assertEqual(result['0/5'], i5)
def test_parse_vlan_brief(self):
result = parse_vlan_brief(load_fixture('edgeswitch_vlan_show_vlan_brief'))
obj = [
{
'vlan_id': '1',
'name': 'default'
},
{
'vlan_id': '100',
'name': 'voice'
}
]
self.assertEqual(result, obj)
|
gpl-3.0
|
surdy/dcos
|
pkgpanda/test_util.py
|
2
|
14309
|
import os
import tempfile
from subprocess import CalledProcessError
import pytest
import pkgpanda.util
from pkgpanda import UserManagement
from pkgpanda.exceptions import ValidationError
PathSeparator = '/' # Currently same for both windows and linux. Constant may vary in near future by platform
def test_remove_file_pass():
"""
Remove a known directory. Should succeed silently.
"""
test_dir = tempfile.gettempdir() + PathSeparator + 'test'
# Here we really don't care if there is a left over dir since we will be removing it
# but we need to make sure there is one
pkgpanda.util.make_directory(test_dir)
assert os.path.isdir(test_dir)
# Build the temporary test file with a random name
fno, test_path = tempfile.mkstemp(dir=test_dir)
os.close(fno) # Close the reference so we don't have dangling file handles
test_data = "Test Data\n"
with open(test_path, "w") as f:
f.write(test_data)
pkgpanda.util.remove_file(test_path)
assert not os.path.exists(test_path), 'Directory item not removed'
pkgpanda.util.remove_directory(test_dir)
assert not os.path.exists(test_dir)
def test_remove_file_fail():
"""
Remove a non existant directory item. Should fail silently without exceptions.
"""
test_dir = tempfile.gettempdir() + PathSeparator + 'remove_directory_fail'
test_path = test_dir + PathSeparator + "A"
# Make sure there is no left over directory
pkgpanda.util.remove_directory(test_dir)
assert not os.path.isdir(test_dir)
# We will try to remove a non-existant file
try:
pkgpanda.util.remove_file(test_path)
except Exception:
assert False, "Unexpected exception when trying to delete non existant directory item. Should fail silently"
assert not os.path.exists(test_path)
def test_make_directory_pass():
"""
Create a known directory and verify. Postcondition: the directory should exist
"""
test_dir = tempfile.gettempdir() + PathSeparator + 'make_directory_pass'
# Make sure there is no left over directory
pkgpanda.util.remove_directory(test_dir)
assert not os.path.isdir(test_dir)
# Make the directory and check for its existence as a dir
pkgpanda.util.make_directory(test_dir)
assert os.path.isdir(test_dir)
# Cleanup
pkgpanda.util.remove_directory(test_dir)
def test_make_directory_fail():
"""
Attempt to create a directory with a null name. Postcondition: Should throw an OSError
"""
test_dir = "" # Lets make nothing...
# Try to make the directory and check for its error
try:
pkgpanda.util.make_directory(test_dir)
except OSError as e:
assert e.errno == 2 # File not foundError
return
assert False, 'did not see expected OSError when trying to build unnamed directory'
def test_copy_file_pass():
"""
Copy a file from a known directory to another known file path.
Postcondition: The file should have been copied.
The copy should contain the same contents as the original.
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# Build the dirs for copying to/from
pkgpanda.util.make_directory(test_src_dir)
pkgpanda.util.make_directory(test_dst_dir)
# Build the source file
fno, src_path = tempfile.mkstemp(dir=test_src_dir)
os.close(fno)
# Build the temporary dest file with a random name
fno, dst_path = tempfile.mkstemp(dir=test_dst_dir)
os.close(fno) # Close the reference so we don't have dangling file handles
test_data = "Test Data\n"
with open(src_path, "w") as f:
f.write(test_data)
# copy the source file to the destination directory
pkgpanda.util.copy_file(src_path, dst_path)
lines = []
with open(dst_path, "r") as f:
lines = f.readlines()
assert lines[0] == test_data
def test_file_fail():
"""
Copy a file from a known directory to another known file path whose directory does not exist.
Postcondition: Should throw a CalledProcessError or an OSError
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# Build the dirs for copying to/from
pkgpanda.util.make_directory(test_src_dir)
# Build the source file
fno, src_path = tempfile.mkstemp(dir=test_src_dir)
os.close(fno)
dst_path = test_dst_dir + PathSeparator + os.path.basename(src_path)
test_data = "Test Data\n"
with open(src_path, "w") as f:
f.write(test_data)
# copy the source file to the destination directory
try:
pkgpanda.util.copy_file(src_path, dst_path)
except CalledProcessError as e:
return
except OSError as e:
return
assert False, 'did not see expected OSError when trying to copy to non-existant directory item'
def test_copy_directory_pass():
"""
Copy a directory of files from a known directory to another known file path whose directory does not exist.
Postcondition: Should have recursively created the directories and files for the entire tree
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# Build the dirs for copying to/from
pkgpanda.util.make_directory(test_src_dir)
# Build the temporary source file with a random name
fno, src_path = tempfile.mkstemp(dir=test_src_dir)
os.close(fno) # Close the reference so we don't have dangling file handles
dst_path = test_dst_dir + PathSeparator + os.path.basename(src_path)
test_data = "Test Data\n"
with open(src_path, "w") as f:
f.write(test_data)
# copy the source file to the destination directory
pkgpanda.util.copy_directory(test_src_dir, test_dst_dir)
with open(dst_path, "r") as f:
lines = f.readlines()
assert lines[0] == test_data
def test_copy_directory_fail():
"""
Attempt to copy a directory of files from a none existant directory to another
known file path whose directory does not exist.
Postcondition: We should get either a
"""
# Make sure we don't have the temp dirs and files left over
test_src_dir = tempfile.gettempdir() + PathSeparator + 'test_src'
test_dst_dir = tempfile.gettempdir() + PathSeparator + 'test_dst'
pkgpanda.util.remove_directory(test_src_dir)
pkgpanda.util.remove_directory(test_dst_dir)
assert not os.path.isdir(test_src_dir)
assert not os.path.isdir(test_dst_dir)
# try to copy the source file to the destination directory
try:
pkgpanda.util.copy_directory(test_src_dir, test_dst_dir)
except CalledProcessError as e:
return
except OSError as e:
return
assert False, 'did not see expected OSError when trying to copy to non-existant directory tree'
def test_remove_directory():
test_dir = tempfile.gettempdir() + PathSeparator + 'test'
# Here we really don't care if there is a left over dir since we will be removing it
# but we need to make sure there is one
pkgpanda.util.make_directory(test_dir)
assert os.path.isdir(test_dir)
# Add some subdirectories and files
pkgpanda.util.make_directory(test_dir + PathSeparator + 'A')
# Build a file
fno, file_path = tempfile.mkstemp(dir=test_dir)
os.close(fno)
test_data = "Test Data\n"
with open(file_path, "r+") as f:
f.write(test_data)
# Build a file
fno, file_path = tempfile.mkstemp(dir=test_dir + PathSeparator + 'A')
os.close(fno)
test_data = "Test Data 2\n"
with open(file_path, "r+") as f:
f.write(test_data)
pkgpanda.util.remove_directory(test_dir)
assert not os.path.exists(file_path)
assert not os.path.isdir(test_dir + PathSeparator + 'A')
assert not os.path.isdir(test_dir)
def test_variant_variations():
assert pkgpanda.util.variant_str(None) == ''
assert pkgpanda.util.variant_str('test') == 'test'
assert pkgpanda.util.variant_object('') is None
assert pkgpanda.util.variant_object('test') == 'test'
assert pkgpanda.util.variant_name(None) == '<default>'
assert pkgpanda.util.variant_name('test') == 'test'
assert pkgpanda.util.variant_prefix(None) == ''
assert pkgpanda.util.variant_prefix('test') == 'test.'
def test_validate_username():
def good(name):
UserManagement.validate_username(name)
def bad(name):
with pytest.raises(ValidationError):
UserManagement.validate_username(name)
good('dcos_mesos')
good('dcos_a')
good('dcos__')
good('dcos_a_b_c')
good('dcos_diagnostics')
good('dcos_a1')
good('dcos_1')
bad('dcos')
bad('d')
bad('d_a')
bad('foobar_asdf')
bad('dcos_***')
bad('dc/os_foobar')
bad('dcos_foo:bar')
bad('3dcos_foobar')
bad('dcos3_foobar')
@pytest.mark.skipif(pkgpanda.util.is_windows, reason="Windows does not have a root group")
def test_validate_group():
# assuming linux distributions have `root` group.
UserManagement.validate_group('root')
with pytest.raises(ValidationError):
UserManagement.validate_group('group-should-not-exist')
def test_split_by_token():
split_by_token = pkgpanda.util.split_by_token
# Token prefix and suffix must not be empty.
with pytest.raises(ValueError):
list(split_by_token('', ')', 'foo'))
with pytest.raises(ValueError):
list(split_by_token('(', '', 'foo'))
with pytest.raises(ValueError):
list(split_by_token('', '', 'foo'))
# Empty string.
assert list(split_by_token('{{ ', ' }}', '')) == [('', False)]
# String with no tokens.
assert list(split_by_token('{{ ', ' }}', 'no tokens')) == [('no tokens', False)]
# String with one token.
assert list(split_by_token('{{ ', ' }}', '{{ token_name }}')) == [('{{ token_name }}', True)]
assert list(split_by_token('{{ ', ' }}', 'foo {{ token_name }}')) == [('foo ', False), ('{{ token_name }}', True)]
assert list(split_by_token('{{ ', ' }}', '{{ token_name }} foo')) == [('{{ token_name }}', True), (' foo', False)]
# String with multiple tokens.
assert list(split_by_token('{{ ', ' }}', 'foo {{ token_a }} bar {{ token_b }} \n')) == [
('foo ', False), ('{{ token_a }}', True), (' bar ', False), ('{{ token_b }}', True), (' \n', False)
]
# Token decoration is stripped when requested.
assert list(split_by_token('[[', ']]', 'foo [[token_a]] bar[[token_b ]]', strip_token_decoration=True)) == [
('foo ', False), ('token_a', True), (' bar', False), ('token_b ', True)
]
# Token prefix and suffix can be the same.
assert list(split_by_token('||', '||', 'foo ||token_a|| bar ||token_b|| \n')) == [
('foo ', False), ('||token_a||', True), (' bar ', False), ('||token_b||', True), (' \n', False)
]
assert list(split_by_token('||', '||', 'foo ||token_a|| bar ||token_b|| \n', strip_token_decoration=True)) == [
('foo ', False), ('token_a', True), (' bar ', False), ('token_b', True), (' \n', False)
]
# Missing token suffix.
with pytest.raises(Exception):
list(split_by_token('(', ')', '(foo) (bar('))
# Missing suffix for middle token.
with pytest.raises(Exception):
list(split_by_token('[[', ']]', '[[foo]] [[bar [[baz]]'))
# Missing token prefix.
with pytest.raises(Exception):
list(split_by_token('[[', ']]', 'foo]] [[bar]]'))
# Nested tokens.
with pytest.raises(Exception):
list(split_by_token('[[', ']]', '[[foo]] [[bar [[baz]] ]]'))
# Docstring examples.
assert list(split_by_token('{', '}', 'some text {token} some more text')) == [
('some text ', False), ('{token}', True), (' some more text', False)
]
assert list(split_by_token('{', '}', 'some text {token} some more text', strip_token_decoration=True)) == [
('some text ', False), ('token', True), (' some more text', False)
]
# TODO: DCOS_OSS-3508 - muted Windows tests requiring investigation
@pytest.mark.skipif(pkgpanda.util.is_windows, reason="Windows and Linux permissions parsed differently")
def test_write_string(tmpdir):
"""
`pkgpanda.util.write_string` writes or overwrites a file with permissions
for User to read and write, Group to read and Other to read.
Permissions of the given filename are preserved, or a new file is created
with 0o644 permissions.
This test was written to make current functionality regression-safe which
is why no explanation is given for these particular permission
requirements.
"""
filename = os.path.join(str(tmpdir), 'foo_filename')
pkgpanda.util.write_string(filename=filename, data='foo_contents')
with open(filename) as f:
assert f.read() == 'foo_contents'
pkgpanda.util.write_string(filename=filename, data='foo_contents_2')
with open(filename) as f:
assert f.read() == 'foo_contents_2'
st_mode = os.stat(filename).st_mode
expected_permission = 0o644
assert (st_mode & 0o777) == expected_permission
os.chmod(filename, 0o777)
pkgpanda.util.write_string(filename=filename, data='foo_contents_3')
with open(filename) as f:
assert f.read() == 'foo_contents_3'
st_mode = os.stat(filename).st_mode
expected_permission = 0o777
assert (st_mode & 0o777) == expected_permission
|
apache-2.0
|
wsmith323/django
|
django/db/backends/base/schema.py
|
339
|
43421
|
import hashlib
import logging
from django.db.backends.utils import truncate_name
from django.db.transaction import atomic
from django.utils import six
from django.utils.encoding import force_bytes
logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
)
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_update_with_default = "UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = (
"ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) "
"REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
)
sql_create_inline_fk = None
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection, collect_sql=False):
self.connection = connection
self.collect_sql = collect_sql
if self.collect_sql:
self.collected_sql = []
# State-managing methods
def __enter__(self):
self.deferred_sql = []
if self.connection.features.can_rollback_ddl:
self.atomic = atomic(self.connection.alias)
self.atomic.__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
if self.connection.features.can_rollback_ddl:
self.atomic.__exit__(exc_type, exc_value, traceback)
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
if self.collect_sql:
ending = "" if sql.endswith(";") else ";"
if params is not None:
self.collected_sql.append((sql % tuple(map(self.quote_value, params))) + ending)
else:
self.collected_sql.append(sql + ending)
else:
with self.connection.cursor() as cursor:
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
for arg in args:
h.update(force_bytes(arg))
return h.hexdigest()[:8]
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None, None
# Work out nullability
null = field.null
# If we were told to include a default value, do so
include_default = include_default and not self.skip_default(field)
if include_default:
default_value = self.effective_default(field)
if default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null and not self.connection.features.implied_column_null:
sql += " NULL"
elif not null:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Return the sql
return sql, params
def skip_default(self, field):
"""
Some backends don't accept default values for certain columns types
(i.e. MySQL longtext and longblob).
"""
return False
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError(
'subclasses of BaseDatabaseSchemaEditor for backends which have '
'requires_literal_defaults must provide a prepare_default() method'
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
if field.get_internal_type() == "BinaryField":
default = six.binary_type()
else:
default = six.text_type()
else:
default = None
# If it's a callable, call it
if six.callable(default):
default = default()
# Run it through the field's get_db_prep_save method so we can send it
# to the database.
default = field.get_db_prep_save(default, self.connection)
return default
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
so this is safe).
"""
raise NotImplementedError()
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Autoincrement SQL (for backends with inline variant)
col_type_suffix = field.db_type_suffix(connection=self.connection)
if col_type_suffix:
definition += " %s" % col_type_suffix
params.extend(extra_params)
# FK
if field.remote_field and field.db_constraint:
to_table = field.remote_field.model._meta.db_table
to_column = field.remote_field.model._meta.get_field(field.remote_field.field_name).column
if self.connection.features.supports_foreign_keys:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
elif self.sql_create_inline_fk:
definition += " " + self.sql_create_inline_fk % {
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
# Autoincrement SQL (for backends with post table definition variant)
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers (always deferred, as some fields might be
# created afterwards, like geometry fields with some backends)
for fields in model._meta.unique_together:
columns = [model._meta.get_field(field).column for field in fields]
self.deferred_sql.append(self._create_unique_sql(model, columns))
# Make the table
sql = self.sql_create_table % {
"table": self.quote_name(model._meta.db_table),
"definition": ", ".join(column_sqls)
}
if model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
if tablespace_sql:
sql += ' ' + tablespace_sql
# Prevent using [] as params, in the case a literal '%' is used in the definition
self.execute(sql, params or None)
# Add any field index and index_together's (deferred as SQLite3 _remake_table needs it)
self.deferred_sql.extend(self._model_indexes_sql(model))
# Make M2M tables
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
self.delete_model(field.remote_field.through)
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'unique': True}, self.sql_delete_unique)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field(field).column for field in fields]
self.execute(self._create_unique_sql(model, columns))
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
self._delete_composed_index(model, fields, {'index': True}, self.sql_delete_index)
# Created indexes
for field_names in news.difference(olds):
fields = [model._meta.get_field(field) for field in field_names]
self.execute(self._create_index_sql(model, fields, suffix="_idx"))
def _delete_composed_index(self, model, fields, constraint_kwargs, sql):
columns = [model._meta.get_field(field).column for field in fields]
constraint_names = self._constraint_names(model, columns, **constraint_kwargs)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
if old_db_table == new_db_table:
return
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.create_model(field.remote_field.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if not self.skip_default(field) and field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(self._create_index_sql(model, [field]))
# Add any FK constraints later
if field.remote_field and self.connection.features.supports_foreign_keys and field.db_constraint:
self.deferred_sql.append(self._create_fk_sql(model, field, "_fk_%(to_table)s_%(to_column)s"))
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
return self.delete_model(field.remote_field.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Drop any FK constraints, MySQL requires explicit deletion
if field.remote_field:
fk_names = self._constraint_names(model, [field.column], foreign_key=True)
for fk_name in fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if ((old_type is None and old_field.remote_field is None) or
(new_type is None and new_field.remote_field is None)):
raise ValueError(
"Cannot alter field %s into %s - they do not properly define "
"db_type (are you using a badly-written custom field?)" %
(old_field, new_field),
)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
old_field.remote_field.through._meta.auto_created and
new_field.remote_field.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None and new_type is None and (
old_field.remote_field.through and new_field.remote_field.through and
not old_field.remote_field.through._meta.auto_created and
not new_field.remote_field.through._meta.auto_created):
# Both sides have through models; this is a no-op.
return
elif old_type is None or new_type is None:
raise ValueError(
"Cannot alter field %s into %s - they are not compatible types "
"(you cannot alter to or from M2M fields, or add or remove "
"through= on M2M fields)" % (old_field, new_field)
)
self._alter_field(model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict)
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
fks_dropped.add((old_field.column,))
self.execute(self._delete_constraint_sql(self.sql_delete_fk, model, fk_name))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_unique, model, constraint_name))
# Drop incoming FK constraints if we're a primary key and things are going
# to change.
if old_field.primary_key and new_field.primary_key and old_type != new_type:
# '_meta.related_field' also contains M2M reverse fields, these
# will be filtered out
for _old_rel, new_rel in _related_non_m2m_objects(old_field, new_field):
rel_fk_names = self._constraint_names(
new_rel.related_model, [new_rel.field.column], foreign_key=True
)
for fk_name in rel_fk_names:
self.execute(self._delete_constraint_sql(self.sql_delete_fk, new_rel.related_model, fk_name))
# Removed an index? (no strict check, as multiple indexes are possible)
if (old_field.db_index and not new_field.db_index and
not old_field.unique and not
(not new_field.unique and old_field.unique)):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
for index_name in index_names:
self.execute(self._delete_constraint_sql(self.sql_delete_index, model, index_name))
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_check, model, constraint_name))
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self._rename_field_sql(model._meta.db_table, old_field, new_field, new_type))
# Next, start accumulating actions to do
actions = []
null_actions = []
post_actions = []
# Type change?
if old_type != new_type:
fragment, other_actions = self._alter_column_type_sql(
model._meta.db_table, old_field, new_field, new_type
)
actions.append(fragment)
post_actions.extend(other_actions)
# When changing a column NULL constraint to NOT NULL with a given
# default value, we need to perform 4 steps:
# 1. Add a default for new incoming writes
# 2. Update existing NULL rows with new default
# 3. Replace NULL constraint with NOT NULL
# 4. Drop the default again.
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
needs_database_default = (
old_default != new_default and
new_default is not None and
not self.skip_default(new_field)
)
if needs_database_default:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if (self.connection.features.interprets_empty_strings_as_nulls and
new_field.get_internal_type() in ("CharField", "TextField")):
# The field is nullable in the database anyway, leave it alone
pass
elif new_field.null:
null_actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
null_actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Only if we have a default and there is a change from NULL to NOT NULL
four_way_default_alteration = (
new_field.has_default() and
(old_field.null and not new_field.null)
)
if actions or null_actions:
if not four_way_default_alteration:
# If we don't have to do a 4-way default alteration we can
# directly run a (NOT) NULL alteration
actions = actions + null_actions
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters and actions:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), sum(params, []))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if four_way_default_alteration:
# Update existing rows with default value
self.execute(
self.sql_update_with_default % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
)
# Since we didn't run a NOT NULL change before we need to do it
# now
for sql, params in null_actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
if post_actions:
for sql, params in post_actions:
self.execute(sql, params)
# Added a unique?
if (not old_field.unique and new_field.unique) or (
old_field.primary_key and not new_field.primary_key and new_field.unique
):
self.execute(self._create_unique_sql(model, [new_field.column]))
# Added an index?
if (not old_field.db_index and new_field.db_index and
not new_field.unique and not
(not old_field.unique and new_field.unique)):
self.execute(self._create_index_sql(model, [new_field], suffix="_uniq"))
# Type alteration on primary key? Then we need to alter the column
# referring to us.
rels_to_update = []
if old_field.primary_key and new_field.primary_key and old_type != new_type:
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(self._delete_constraint_sql(self.sql_delete_pk, model, constraint_name))
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_pk")),
"columns": self.quote_name(new_field.column),
}
)
# Update all referencing columns
rels_to_update.extend(_related_non_m2m_objects(old_field, new_field))
# Handle our type alters on the other end of rels from the PK stuff above
for old_rel, new_rel in rels_to_update:
rel_db_params = new_rel.field.db_parameters(connection=self.connection)
rel_type = rel_db_params['type']
fragment, other_actions = self._alter_column_type_sql(
new_rel.related_model._meta.db_table, old_rel.field, new_rel.field, rel_type
)
self.execute(
self.sql_alter_column % {
"table": self.quote_name(new_rel.related_model._meta.db_table),
"changes": fragment[0],
},
fragment[1],
)
for sql, params in other_actions:
self.execute(sql, params)
# Does it have a foreign key?
if (new_field.remote_field and
(fks_dropped or not old_field.remote_field or not old_field.db_constraint) and
new_field.db_constraint):
self.execute(self._create_fk_sql(model, new_field, "_fk_%(to_table)s_%(to_column)s"))
# Rebuild FKs that pointed to us if we previously had to drop them
if old_field.primary_key and new_field.primary_key and old_type != new_type:
for rel in new_field.model._meta.related_objects:
if not rel.many_to_many:
self.execute(self._create_fk_sql(rel.related_model, rel.field, "_fk"))
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, [new_field.column], suffix="_check")),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if needs_database_default:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
}
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Hook to specialize column type alteration for different backends,
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
"""
return (
(
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
),
[],
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
new_field.remote_field.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.remote_field.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.remote_field.through._meta.get_field(old_field.m2m_reverse_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_reverse_field_name()),
)
self.alter_field(
new_field.remote_field.through,
# for self-referential models we need to alter field from the other end too
old_field.remote_field.through._meta.get_field(old_field.m2m_field_name()),
new_field.remote_field.through._meta.get_field(new_field.m2m_field_name()),
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, self._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%s' % self._digest(table_name, *column_names)
max_length = self.connection.ops.max_name_length() or 200
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (
table_name, column_names[0], index_unique_name, suffix,
)).replace('"', '').replace('.', '_')
if len(index_name) > max_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(max_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > max_length:
index_name = hashlib.md5(force_bytes(index_name)).hexdigest()[:max_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _create_index_sql(self, model, fields, suffix="", sql=None):
"""
Return the SQL statement to create the index for one or several fields.
`sql` can be specified if the syntax differs from the standard (GIS
indexes, ...).
"""
if len(fields) == 1 and fields[0].db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(fields[0].db_tablespace)
elif model._meta.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(model._meta.db_tablespace)
else:
tablespace_sql = ""
if tablespace_sql:
tablespace_sql = " " + tablespace_sql
columns = [field.column for field in fields]
sql_create_index = sql or self.sql_create_index
return sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix=suffix)),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": tablespace_sql,
}
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together) for the
specified model, as a list.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
output = []
for field in model._meta.local_fields:
if field.db_index and not field.unique:
output.append(self._create_index_sql(model, [field], suffix=""))
for field_names in model._meta.index_together:
fields = [model._meta.get_field(field) for field in field_names]
output.append(self._create_index_sql(model, fields, suffix="_idx"))
return output
def _rename_field_sql(self, table, old_field, new_field, new_type):
return self.sql_rename_column % {
"table": self.quote_name(table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
}
def _create_fk_sql(self, model, field, suffix):
from_table = model._meta.db_table
from_column = field.column
to_table = field.target_field.model._meta.db_table
to_column = field.target_field.column
suffix = suffix % {
"to_table": to_table,
"to_column": to_column,
}
return self.sql_create_fk % {
"table": self.quote_name(from_table),
"name": self.quote_name(self._create_index_name(model, [from_column], suffix=suffix)),
"column": self.quote_name(from_column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
def _create_unique_sql(self, model, columns):
return self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(self._create_index_name(model, columns, suffix="_uniq")),
"columns": ", ".join(self.quote_name(column) for column in columns),
}
def _delete_constraint_sql(self, template, model, name):
return template % {
"table": self.quote_name(model._meta.db_table),
"name": self.quote_name(name),
}
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
with self.connection.cursor() as cursor:
constraints = self.connection.introspection.get_constraints(cursor, model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result
|
bsd-3-clause
|
CottageLabs/OpenArticleGauge
|
openarticlegauge/tests/test_provider_skeleton.py
|
1
|
11803
|
from unittest import TestCase
import requests, os
from openarticlegauge import config, models
######################################################################################
# Set these variables/imports and the test case will use them to perform some general
# tests on your provider code
# import your plugin as "MyPlugin" (here, replace "plos" and "PLOSPlugin" with your plugin module's name and class)
from openarticlegauge.plugins.plos import PLOSPlugin as MyPlugin
# a list of urls which your plugin should be able to support
# these example values are from the PLOS plugin, they can be replaced with your own urls
SUPPORTED_URLS = ["http://www.plosone.org/1234", "www.plosbiology.org/fakjsskjdaf"]
# a list of urls which your plugin SHOULD NOT be able to support
# these example values are ones which the PLOS plugin does not support (and one of them isn't even a url!)
UNSUPPORTED_URLS = ["http://www.biomedcentral.com/", "askjdfsakjdhfsa"]
# a list of file paths and the expected licence object from parsing that file path
#
# in the examples here we construct file paths that are relative to this test class
# in the "resources" sub-directory. If you put your test documents in there, then
# all you need to change is the filename, which is the final argument passed os.path.join
#
# the example file used resources/pbio1001406.html is a plos web page
#
# The licence object is as defined in the OAG API documentation. Any fields omitted will
# not be checked. Any fields included will be checked for an exact match against the actual
# record received when the plugin is run. See below for the full spec of the licence
# object.
#
# The rules for the comparison licence object are:
# - if a key has a value, there resulting object's value must match exactly
# - if a key has been omitted, it will not be tested
# - if a key's value is the empty string, the resulting object's key's value must be the empty string
# - if a key's value is None, the resulting object MUST NOT have the key or MUST be the empty string
# - if a key's value is -1, the resulting object MUST have the key
#
RESOURCE_AND_RESULT = {
os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources", "pbio.1001406.html") :
{
"id" : None, # there should be no id field
"version": "", # version should be the empty string
"type": "cc-by",
"jurisdiction": "", # jurisdiction should be the empty string
"open_access": True,
"BY": True,
"NC": False,
"ND": False,
"SA": False,
"provenance": {
"handler": MyPlugin._short_name, # name of plugin which processed this record
"handler_version": MyPlugin.__version__, # version of plugin which processed this record
"category": "page_scrape", # category is page_scrape
"description": 'License decided by scraping the resource at http://www.plosbiology.org/article/info%3Adoi%2F10.1371%2Fjournal.pbio.1001406 and looking for the following license statement: "This is an open-access article distributed under the terms of the Creative Commons Attribution License, which permits unrestricted use, distribution, and reproduction in any medium, provided the original author and source are credited.".', # description is a long string
"agent": config.agent, # agent is from configuration
"source": "http://www.plosbiology.org/article/info%3Adoi%2F10.1371%2Fjournal.pbio.1001406", # source is the url where we look this record up
"date": -1 # date is not null (but we don't know the exact value)
}
}, ## Just add a comma and copy the whole record to add a test for a
## second license statement
## ~~ TUTORIAL: YOU NEED TO MODIFY THIS ~~
## If you don't want more than one license statement for now, just
## delete this second statement test entirely. It's OK to leave the
## comma before it.
os.path.join(os.path.dirname(os.path.abspath(__file__)), "resources", "pmed.1001008.html") :
{
"id" : None, # there should be no id field
"version": "", # version should be the empty string
"type": "plos-who",
"jurisdiction": "", # jurisdiction should be the empty string
"open_access": False,
"BY": True,
# "NC": None, # can't test for None values atm, limitation of the way tests are defined/run atm
"SA": False,
"ND": False,
"provenance": {
"handler": 'plos', # name of plugin which processed this record
"handler_version": '0.1', # version of plugin which processed this record
"category": "page_scrape", # category is page_scrape
"description": '''License decided by scraping the resource at http://www.plosmedicine.org/article/info:doi/10.1371/journal.pmed.1001008 and looking for the following license statement: "This is an Open Access article in the spirit of the Public Library of Science (PLoS) principles for Open Access http://www.plos.org/oa/, without any waiver of WHO's privileges and immunities under international law, convention, or agreement. This article should not be reproduced for use in association with the promotion of commercial products, services, or any legal entity. There should be no suggestion that WHO endorses any specific organization or products. The use of the WHO logo is not permitted. This notice should be preserved along with the article's original URL.".''', # description is a long string
"agent": config.agent, # agent is from configuration
"source": "http://www.plosmedicine.org/article/info:doi/10.1371/journal.pmed.1001008", # source is the url where we look this record up
"date": -1 # date is not null (but we don't know the exact value)
}
}, ## Just add a comma and copy the whole record to add a test for a
## *third* license statement test
}
"""
Full specification of the OAG licence object, as taken from the API documentation:
{
"status": "active",
"maintainer": "",
"description": "",
"family": ""
"title": "Creative Commons Attribution",
"domain_data": true/false,
"url": "http://www.opendefinition.org/licenses/cc-by",
"version": "",
"domain_content": true/false,
"is_okd_compliant": true/false,
"is_osi_compliant": true/false,
"domain_software": true/false,
"type": "cc-by",
"jurisdiction": "",
"open_access": true/false,
"BY": true/false,
"NC": true/false,
"ND": true/false,
"SA": true/false,
"provenance": {
"category": "page_scrape",
"description": "how the content was acquired ...",
"agent": "OpenArticleGauge Service/0.1 alpha",
"source": "http://www.plosbiology.org/article/info%3Adoi%2F10...",
"date": "2013-02-16T21:51:54.669040"
}
}
"""
################################################################################
CURRENT_REQUEST = None
# Mock response object for the requests library. If your provider does anything other
# than look at the http response and the page content you will need to extend this
class MockResponse():
def __init__(self):
self.status_code = None
self.text = None
self.content = None
self.url = None
def mock_get(url, *args, **kwargs):
resp = MockResponse()
resp.status_code = 200
resp.url = CURRENT_REQUEST
resp.headers = {'content-length': 100}
for filename, obj in RESOURCE_AND_RESULT.iteritems():
if obj['provenance']['source'] == CURRENT_REQUEST:
with open(filename) as f:
resp.text = f.read()
break
resp.content = resp.text
def return_all_content(*args, **kwargs):
return resp.content
resp.iter_content = return_all_content
return resp
class TestProvider(TestCase):
def setUp(self):
global CURRENT_REQUEST
CURRENT_REQUEST = None
self.old_get = requests.get
requests.get = mock_get
def tearDown(self):
global CURRENT_REQUEST
CURRENT_REQUEST = None
requests.get = self.old_get
def test_01_supports_success(self):
p = MyPlugin()
for url in SUPPORTED_URLS:
assert p.supports({"url" : [url]})
def test_02_supports_fail(self):
p = MyPlugin()
for url in UNSUPPORTED_URLS:
assert not p.supports({"url" : [url]})
def test_03_resource_and_result(self):
global CURRENT_REQUEST
# go through each file and result object
for path, comparison in RESOURCE_AND_RESULT.iteritems():
# construct a request object, using the provenance/source url as the provider url
record = {}
record['bibjson'] = {}
record['provider'] = {}
record['provider']['url'] = [comparison['provenance']['source']]
record = models.MessageObject(record=record)
# set the current request so that the monkey patch knows how to respond
CURRENT_REQUEST = comparison['provenance']['source']
# run the plugin
p = MyPlugin()
p.license_detect(record)
record = record.record
# check if all the top-level keys were created
assert "bibjson" in record
assert "license" in record['bibjson']
assert record['bibjson']['license'] is not None
# The rules for the comparison licence object are:
# - if a key has a value, there resulting object's value must match exactly
# - if a key has been omitted, it will not be tested
# - if a key's value is the empty string, the resulting object's key's value must be the empty string
# - if a key's value is None, the resulting object MUST NOT have the key or MUST be the empty string
# - if a key's value is -1, the resulting object MUST have the key
licence = record['bibjson']['license'][0]
for key, value in comparison.iteritems():
if key == "provenance":
# for better layout of code, let's do provenance separately
continue
if value is None:
# the resulting object MUST NOT have the key or MUST be the empty string
assert key not in licence or licence.get(key) == "", ((key, value), licence.get(key))
elif value == -1:
# the resulting object MUST have the key
assert key in licence, ((key, value), licence.get(key))
else:
# the resulting object must match the comparison object
assert value == licence.get(key), ((key, value), licence.get(key))
prov = licence.get("provenance", {})
for key, value in comparison.get("provenance", {}).iteritems():
if value is None:
# the resulting object MUST NOT have the key
assert key not in prov or prov.get(key) == "", ((key, value), prov.get(key))
elif value == -1:
# the resulting object MUST have the key
assert key in prov, ((key, value), prov.get(key))
else:
# the resulting object must match the comparison object
assert value == prov.get(key), ((key, value), prov.get(key))
|
bsd-3-clause
|
tpetmanson/pfe
|
pypfe/corpedit.py
|
2
|
2341
|
# -*- coding: utf-8 -*-
# Python 2.7
# Pattern based fact extraction library.
# Copyright (C) 2013 University of Tartu
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from corpus import *
import cherrypy
import sys
import shelve
import json
class CorpEdit(object):
def index(self):
cherrypy.response.headers['Content-Type']= 'text/html'
f = open('static/corpedit.html')
html = f.read()
f.close()
return html
def corpedit_js(self):
cherrypy.response.headers['Content-Type']= 'text/javascript'
f = open('static/corpedit.js')
js = f.read()
f.close()
return js
def ajaxloader_gif(self):
cherrypy.response.headers['Content-Type']= 'image/gif'
f = open('static/ajaxloader.gif')
js = f.read()
f.close()
return js
def style_css(self):
cherrypy.response.headers['Content-Type']= 'text/css'
f = open('static/style.css')
js = f.read()
f.close()
return js
def get_series_names(self):
cherrypy.response.headers['Content-Type']= 'application/json'
c = shelve.open(path)
if len(c) > 0:
return json.dumps(list(c[c.keys()[0]].columns))
else:
return json.dumps([])
index.exposed = True
corpedit_js.exposed = True
get_series_names.exposed = True
ajaxloader_gif.exposed = True
style_css.exposed = True
def do_main():
global path
if len(sys.argv) < 2:
raise Exception('You must provide the path of the corpus to serve')
path = sys.argv[1].strip()
cherrypy.quickstart(CorpEdit())
if __name__ == '__main__':
do_main()
|
gpl-3.0
|
openstack/nova
|
nova/virt/arch.py
|
7
|
1548
|
# Copyright 2018 VEXXHOST, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ALPHA = 'alpha'
ARMV6 = 'armv6'
ARMV7 = 'armv7l'
ARMV7B = 'armv7b'
AARCH64 = 'aarch64'
CRIS = 'cris'
I686 = 'i686'
IA64 = 'ia64'
LM32 = 'lm32'
M68K = 'm68k'
MICROBLAZE = 'microblaze'
MICROBLAZEEL = 'microblazeel'
MIPS = 'mips'
MIPSEL = 'mipsel'
MIPS64 = 'mips64'
MIPS64EL = 'mips64el'
OPENRISC = 'openrisc'
PARISC = 'parisc'
PARISC64 = 'parisc64'
PPC = 'ppc'
PPCLE = 'ppcle'
PPC64 = 'ppc64'
PPC64LE = 'ppc64le'
PPCEMB = 'ppcemb'
S390 = 's390'
S390X = 's390x'
SH4 = 'sh4'
SH4EB = 'sh4eb'
SPARC = 'sparc'
SPARC64 = 'sparc64'
UNICORE32 = 'unicore32'
X86_64 = 'x86_64'
XTENSA = 'xtensa'
XTENSAEB = 'xtensaeb'
ALL = (
ALPHA, ARMV6, ARMV7, ARMV7B,
AARCH64, CRIS, I686, IA64, LM32,
M68K, MICROBLAZE, MICROBLAZEEL, MIPS, MIPSEL,
MIPS64, MIPS64EL, OPENRISC, PARISC, PARISC64,
PPC, PPCLE, PPC64, PPC64LE, PPCEMB,
S390, S390X, SH4, SH4EB, SPARC,
SPARC64, UNICORE32, X86_64, XTENSA, XTENSAEB,
)
|
apache-2.0
|
batxes/4Cin
|
Six_mouse_models/Six_mouse_models_final_output_0.2_-0.1_11000/Six_mouse_models47608.py
|
2
|
18219
|
import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((1876.28, 6110.33, 7879.68), (0, 1, 0), 846)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((227.419, 8151.12, 7693.99), (0.7, 0.7, 0.7), 846)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((686.681, 6743.94, 8807.41), (0.7, 0.7, 0.7), 846)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((973.579, 6359.38, 8010.24), (0.7, 0.7, 0.7), 846)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((1871.39, 6688.7, 7759.66), (0.7, 0.7, 0.7), 846)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1332.22, 5213.44, 7677.71), (0.7, 0.7, 0.7), 846)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((-719.433, 4545.17, 6962.57), (0.7, 0.7, 0.7), 846)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((1291.27, 4366.88, 8166.26), (0.7, 0.7, 0.7), 846)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((1607.07, 4364.77, 7909.8), (0.7, 0.7, 0.7), 846)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((1629.11, 3418.58, 7165.11), (0.7, 0.7, 0.7), 846)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((1946.6, 3179.7, 9164.61), (0, 1, 0), 846)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((3075.27, 4964.83, 9258.6), (0.7, 0.7, 0.7), 846)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((1867.24, 3345.11, 9356.01), (0.7, 0.7, 0.7), 846)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2503.64, 3615.21, 7678.2), (0.7, 0.7, 0.7), 846)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2587.69, 3967.27, 8956.88), (0.7, 0.7, 0.7), 846)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((3900.11, 4484.82, 10442.2), (0.7, 0.7, 0.7), 846)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((3717.73, 2758.55, 9344.39), (0.7, 0.7, 0.7), 846)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3705.74, 3293.05, 9243.37), (0.7, 0.7, 0.7), 846)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((3801.25, 4697.25, 8630.43), (0.7, 0.7, 0.7), 846)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3401.17, 5526.87, 9421.63), (0.7, 0.7, 0.7), 846)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((3910.74, 4127.01, 8632.02), (0, 1, 0), 846)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((2921.08, 4504.44, 7300.73), (0.7, 0.7, 0.7), 846)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((3876.45, 6091.2, 8600.93), (0.7, 0.7, 0.7), 846)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4027.86, 5333.54, 8368.75), (0.7, 0.7, 0.7), 846)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((3889.17, 4084.62, 6456.66), (0.7, 0.7, 0.7), 846)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((4210.46, 6050.41, 6940.95), (0.7, 0.7, 0.7), 846)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4268.44, 6982.18, 8602.39), (0.7, 0.7, 0.7), 846)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4186.85, 5849.89, 8079.95), (0.7, 0.7, 0.7), 846)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4219.74, 4106.39, 8066.05), (0.7, 0.7, 0.7), 846)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((3980.26, 5579.11, 8479.7), (0.7, 0.7, 0.7), 846)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5131.93, 5218.48, 7759.74), (0, 1, 0), 846)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6168.2, 5989.54, 6879.72), (0.7, 0.7, 0.7), 846)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((5350.45, 6174.25, 7189.09), (0.7, 0.7, 0.7), 846)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((4073.88, 5598.26, 7995.73), (0.7, 0.7, 0.7), 846)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((3813.74, 5660.91, 6369.33), (0.7, 0.7, 0.7), 846)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5023.88, 6538.03, 5901.11), (0.7, 0.7, 0.7), 846)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((3642.74, 4156.47, 5034.25), (1, 0.7, 0), 846)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4149.54, 6748.56, 5448.76), (0.7, 0.7, 0.7), 846)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5139.49, 5643.52, 5333.18), (0.7, 0.7, 0.7), 846)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((6142.99, 7792.85, 5104.74), (1, 0.7, 0), 846)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((6439.45, 5704.27, 4773.46), (0.7, 0.7, 0.7), 846)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((7813.63, 6386.33, 4252.42), (0.7, 0.7, 0.7), 846)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((7465.84, 6746.57, 4775.82), (0.7, 0.7, 0.7), 846)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7347.61, 6637.32, 4337.59), (0.7, 0.7, 0.7), 846)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7833.61, 7288.35, 4938.62), (0.7, 0.7, 0.7), 846)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7187.54, 7028.25, 4582.36), (0.7, 0.7, 0.7), 846)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((8317.37, 6779.6, 4237.43), (0.7, 0.7, 0.7), 846)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((8459.77, 7305.41, 4225.98), (0.7, 0.7, 0.7), 846)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8050.01, 7722.75, 3320.96), (0.7, 0.7, 0.7), 846)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6632.05, 6423.75, 2924.41), (0.7, 0.7, 0.7), 846)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((7240.17, 6387.12, 4555.67), (0.7, 0.7, 0.7), 846)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6453.31, 6920.64, 2899.16), (0, 1, 0), 846)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((9219.68, 5315.54, 4133.53), (0.7, 0.7, 0.7), 846)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((7558.04, 7051.85, 3988.4), (0.7, 0.7, 0.7), 846)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((7298.09, 7995.97, 3180.22), (0.7, 0.7, 0.7), 846)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((7792.95, 6767.38, 4140.42), (0.7, 0.7, 0.7), 846)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6586.69, 6097.49, 4007.42), (0.7, 0.7, 0.7), 846)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((7800.33, 7751.03, 3512.28), (0.7, 0.7, 0.7), 846)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((7634.37, 5985.6, 3708.88), (0.7, 0.7, 0.7), 846)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((6592.54, 7467.84, 3163.19), (0.7, 0.7, 0.7), 846)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((7933.16, 7457.46, 1977.52), (0.7, 0.7, 0.7), 846)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((6390.27, 6121.56, 1799.2), (0, 1, 0), 846)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((5309.04, 4274.26, 1755.85), (0.7, 0.7, 0.7), 846)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((6970.24, 5305.47, 2029.22), (0.7, 0.7, 0.7), 846)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((9097.86, 5923.58, 2830.91), (0.7, 0.7, 0.7), 846)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((8185.36, 5950.73, 1114.4), (0.7, 0.7, 0.7), 846)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((9107.64, 6035.64, 1762.44), (0.7, 0.7, 0.7), 846)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((8603.33, 4832.35, 2967.6), (0.7, 0.7, 0.7), 846)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((9000.35, 3399.6, 1608.48), (0.7, 0.7, 0.7), 846)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((8798.82, 4298.63, 2243.69), (0.7, 0.7, 0.7), 846)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((9029.94, 5264.95, 608.973), (0.7, 0.7, 0.7), 846)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((8147.23, 4356.91, 2369.3), (0, 1, 0), 846)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((7928.77, 4544.22, 960.884), (0.7, 0.7, 0.7), 846)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((9323.85, 5016.54, 1821.91), (0.7, 0.7, 0.7), 846)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((7656.76, 4201.69, 1166.52), (0, 1, 0), 846)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
|
gpl-3.0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.