repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
nke001/attention-lvcsr | libs/Theano/theano/gof/cc.py | 1 | 71042 | """
Defines Linkers that deal with C implementations.
"""
from __future__ import print_function
# Python imports
from copy import copy
import os
import sys
import logging
import numpy
import theano
from theano import config
from theano.compat import PY3
from theano.compat import izip
from six import string_types, reraise
from six.moves import StringIO, xrange
# Note that we need to do this before importing cutils, since when there is
# no theano cache dir initialized yet, importing cutils may require compilation
# of cutils_ext.
from theano.configparser import AddConfigVar, StrParam
# gof imports
from theano.gof import graph
from theano.gof import link
from theano.gof import utils
from theano.gof import cmodule
from theano.gof.compilelock import get_lock, release_lock
from theano.gof.callcache import CallCache
AddConfigVar('gcc.cxxflags',
"Extra compiler flags for gcc",
StrParam(""))
_logger = logging.getLogger("theano.gof.cc")
run_cthunk = None # Will be imported only when needed.
def get_module_cache(init_args=None):
"""
:param init_args: If not None, the (k, v) pairs in this dictionary will
be forwarded to the ModuleCache constructor as keyword arguments.
"""
return cmodule.get_module_cache(config.compiledir, init_args=init_args)
_persistent_module_cache = None
def get_persistent_module_cache():
global _persistent_module_cache
if _persistent_module_cache is None:
_persistent_module_cache = CallCache(os.path.join(config.compiledir,
'persistent_cache'))
return _persistent_module_cache
class CodeBlock:
"""WRITEME
Represents a computation unit composed of declare, behavior, and cleanup.
@ivar declare: C code that declares variables for use by the computation
@ivar behavior: C code that performs the computation
@ivar cleanup: C code that cleans up things allocated or incref-ed
in behavior
"""
def __init__(self, declare, behavior, cleanup, sub):
"""
Initialize a L{CodeBlock} with templatized declare, behavior
and cleanup. The sub parameter will be used in the other
arguments' templates. sub should contain a key called 'id'
that maps to an identifier for this block.
The identifier will be used to determine the failure code and
a label to jump to. It should also contain a key called
'failure_var' that contains the name of the variable that
contains the error code.
"""
self.declare = declare
self.behavior = behavior
# the dummy is because gcc throws an error when a label's
# right next to a closing brace (maybe there's an ignore flag
# for that...)
# we need the label even if cleanup is empty because the
# behavior block jumps there on failure
self.cleanup = ("__label_%(id)i:\n" % sub + cleanup +
"\ndouble __DUMMY_%(id)i;\n" % sub) # % sub
def failure_code(sub):
"""Code contained in sub['fail'], usually substituted for %(fail)s.
It sets information about current error, then goto the code
actually handling the failure, which is defined in struct_gen().
"""
return '''{
%(failure_var)s = %(id)s;
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
goto __label_%(id)i;}''' % sub
def failure_code_init(sub):
"Code for failure in the struct init."
return '''{
if (!PyErr_Occurred()) {
PyErr_SetString(PyExc_RuntimeError,
"Unexpected error in an Op's C code. "
"No Python exception was set.");
}
return %(id)d;
}''' % sub
def code_gen(blocks):
"""WRITEME From a list of L{CodeBlock} instances, returns a string
that executes them all in sequence. eg for C{(decl1, task1,
cleanup1)} and C{(decl2, task2, cleanup2)} the returned string
will be of the form::
decl1
decl2
{
task1
{
task2
cleanup2
}
cleanup1
}
"""
decl = ""
head = ""
tail = ""
for block in blocks:
decl += block.declare
head = head + ("\n{\n%s" % block.behavior)
tail = ("%s\n}\n" % block.cleanup) + tail
return decl + head + tail
def struct_gen(args, struct_builders, blocks, sub):
"""WRITEME
Generates a struct conforming to the following specifications:
* args -> all of the PyObject* type, stored in the struct
they represent the storage and must be length 1 python lists.
* struct_builders -> list of L{CodeBlock} instances such that
* declarations are in the struct
* behavior is in the constructor
* cleanup is in the destructor
* blocks -> list of CodeBlock instances such that
* declarations, behavior and cleanup are in the run()
method of the struct
* sub -> dictionary used to template the struct.
* failure_var -> must contain a variable name to use for
the failure code.
In a nutshell, this returns code for a struct that represents
a function with state. The state's initialization and destruction
are handled by struct_builders and the actual behavior of the
function is handled by blocks.
"""
struct_decl = ""
struct_init_head = ""
struct_init_tail = ""
struct_cleanup = ""
for block in struct_builders:
# decl are declarations that go in the struct
# init_head are in the constructor
# init_tail and cleanup do the same thing, but the former will
# be executed if any step in the constructor fails and the
# latter only at destruction time.
struct_decl += block.declare
struct_init_head = struct_init_head + ("\n%s" % block.behavior)
struct_cleanup += block.cleanup
behavior = code_gen(blocks)
# declares the storage
storage_decl = "\n".join(["PyObject* %s;" % arg for arg in args])
# in the constructor, sets the storage to the arguments
storage_set = "\n".join(["this->%s = %s;" % (arg, arg) for arg in args])
# increments the storage's refcount in the constructor
storage_incref = "\n".join(["Py_XINCREF(%s);" % arg for arg in args])
# decrements the storage's refcount in the destructor
storage_decref = "\n".join(["Py_XDECREF(this->%s);" % arg for arg in args])
args_names = ", ".join(args)
args_decl = ", ".join(["PyObject* %s" % arg for arg in args])
# The following code stores the exception data in __ERROR, which
# is a special field of the struct. __ERROR is a list of length 3
# that holds the type, the value and the traceback. After storing
# the error, we return the failure code so we know which code
# block failed.
do_return = """
if (%(failure_var)s) {
// When there is a failure, this code puts the exception
// in __ERROR.
PyObject* err_type = NULL;
PyObject* err_msg = NULL;
PyObject* err_traceback = NULL;
PyErr_Fetch(&err_type, &err_msg, &err_traceback);
if (!err_type) {err_type = Py_None;Py_INCREF(Py_None);}
if (!err_msg) {err_msg = Py_None; Py_INCREF(Py_None);}
if (!err_traceback) {err_traceback = Py_None; Py_INCREF(Py_None);}
PyObject* old_err_type = PyList_GET_ITEM(__ERROR, 0);
PyObject* old_err_msg = PyList_GET_ITEM(__ERROR, 1);
PyObject* old_err_traceback = PyList_GET_ITEM(__ERROR, 2);
PyList_SET_ITEM(__ERROR, 0, err_type);
PyList_SET_ITEM(__ERROR, 1, err_msg);
PyList_SET_ITEM(__ERROR, 2, err_traceback);
{Py_XDECREF(old_err_type);}
{Py_XDECREF(old_err_msg);}
{Py_XDECREF(old_err_traceback);}
}
// The failure code is returned to index what code block failed.
return %(failure_var)s;
""" % sub
sub = dict(sub)
sub.update(locals())
# TODO: add some error checking to make sure storage_<x> are
# 1-element lists and __ERROR is a 3-elements list.
struct_code = """
namespace {
struct %(name)s {
PyObject* __ERROR;
%(storage_decl)s
%(struct_decl)s
%(name)s() {}
~%(name)s(void) {
cleanup();
}
int init(PyObject* __ERROR, %(args_decl)s) {
%(storage_incref)s
%(storage_set)s
%(struct_init_head)s
this->__ERROR = __ERROR;
return 0;
}
void cleanup(void) {
%(struct_cleanup)s
%(storage_decref)s
}
int run(void) {
int %(failure_var)s = 0;
%(behavior)s
%(do_return)s
}
};
}
""" % sub
return struct_code
# The get_<x> functions complete the return value of r.get_<x>()
# with handling of the py_<name> variable.
def get_nothing(r, name, sub):
"""WRITEME"""
return ""
def get_c_declare(r, name, sub):
"""Wrapper around c_declare that declares py_name"""
# The declaration will be used by the Apply node that
# is computing it (`r.owner`), and by each of the clients.
# If some of these have `check_input=True` in their `.op`,
# it means they need `r`'s dtype to be declared, so
# we have to pass `check_input=True` to `c_declare`.
if ((any([getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]) or
(r.owner and
getattr(r.owner.op, 'check_input', config.check_input)))):
c_declare = r.type.c_declare(name, sub, True)
else:
c_declare = r.type.c_declare(name, sub, False)
pre = """
PyObject* py_%(name)s;
""" % locals()
return pre + c_declare
def get_c_init(r, name, sub):
"""Wrapper around c_init that initializes py_name to Py_None"""
pre = "" """
py_%(name)s = Py_None;
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + r.type.c_init(name, sub)
def get_c_extract(r, name, sub):
"""Wrapper around c_extract that initializes py_name from storage."""
# `c_extract` is called when getting the value of an apply node's
# input from the compute map, before being used by its clients.
# If one of the clients has `check_input=True`, we need to perform
# checks on the variable.
# However that code is not used by C code of the apply node creating
# this variable, so there is no need to check `r.owner.op.check_input`.
if any([getattr(c.op, 'check_input', config.check_input)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if any([getattr(c.op, 'check_broadcast', True)
for (c, _) in r.clients
if not isinstance(c, string_types)]):
c_extract = r.type.c_extract(name, sub, True)
else:
try:
c_extract = r.type.c_extract(
name, sub, True,
check_broadcast=False)
except TypeError as e:
c_extract = r.type.c_extract(name, sub, True)
else:
c_extract = r.type.c_extract(name, sub, False)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
def get_c_extract_out(r, name, sub):
"""Wrapper around c_extract_out that initializes py_name from storage."""
# `c_extract_out` is used to extract an output variable from
# the compute map, to be used as pre-allocated memory for `r`
# before its value gets computed.
# If the node producing `r` has `check_inputs=True`, it may
# also perform type checks on the initial value of the output,
# so we need to pass `check_input=True` to `c_extract_out`.
# However, that code is not used by potential clients of `r`,
# so we do not need to check them.
check_input = getattr(r.owner.op, 'check_input', config.check_input)
# check_broadcast is just an hack to easily remove just the
# broadcast check on the old GPU back-end. This check isn't
# done in the new GPU back-end or on the CPU.
if getattr(r.owner.op, 'check_broadcast', True):
c_extract = r.type.c_extract_out(name, sub, check_input)
else:
try:
c_extract = r.type.c_extract_out(name, sub, check_input,
check_broadcast=False)
except TypeError as e:
c_extract = r.type.c_extract_out(name, sub, check_input)
pre = """
py_%(name)s = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
""" % locals()
return pre + c_extract
def get_c_cleanup(r, name, sub):
"""Wrapper around c_cleanup that decrefs py_name"""
post = """
{Py_XDECREF(py_%(name)s);}
""" % locals()
return r.type.c_cleanup(name, sub) + post
def get_c_sync(r, name, sub):
"""Wrapper around c_sync that syncs py_name with storage."""
return """
if (!%(failure_var)s) {
%(sync)s
PyObject* old = PyList_GET_ITEM(storage_%(name)s, 0);
{Py_XINCREF(py_%(name)s);}
PyList_SET_ITEM(storage_%(name)s, 0, py_%(name)s);
{Py_XDECREF(old);}
}
""" % dict(sync=r.type.c_sync(name, sub), name=name, **sub)
def apply_policy(policy, r, name, sub):
"""WRITEME
@param policy: list of functions that map a L{Variable} to a string,
or a single such function
@type r: L{Variable}
@return: C{policy[0](r) + policy[1](r) + ...}
"""
if isinstance(policy, (list, tuple)):
ret = ""
for sub_policy in policy:
ret += sub_policy(r, name, sub)
return ret
return policy(r, name, sub)
def struct_variable_codeblocks(variable, policies, id, symbol_table, sub):
"""WRITEME
variable -> a Variable
policies -> a pair of tuples ((declare_policy, behavior_policy,
cleanup_policy), -- at construction
(declare_policy, behavior_policy,
cleanup_policy)) -- at execution
the first list will produce an element of the
'struct_builders' argument in struct_gen the second
list will produce an element of the 'blocks' argument
in struct_gen
id -> the id assigned to this variable's task in the computation
symbol_table -> a dict that maps variables to variable names. It
is not read by this function but a variable name for the
variable is computed and added to the table.
sub -> dictionary for use by L{CodeBlock}.
"""
name = "V%i" % id
symbol_table[variable] = name
sub = dict(sub)
# sub['name'] = name
sub['id'] = id
sub['fail'] = failure_code_init(sub)
sub['py_ptr'] = "py_%s" % name
sub['stor_ptr'] = "storage_%s" % name
# struct_declare, struct_behavior, struct_cleanup, sub)
struct_builder = CodeBlock(*[apply_policy(policy, variable, name, sub)
for policy in policies[0]] + [sub])
sub['id'] = id + 1
sub['fail'] = failure_code(sub)
sub['py_ptr'] = "py_%s" % name
sub['stor_ptr'] = "storage_%s" % name
# run_declare, run_behavior, run_cleanup, sub)
block = CodeBlock(*[apply_policy(policy, variable, name, sub)
for policy in policies[1]] + [sub])
return struct_builder, block
class CLinker(link.Linker):
"""WRITEME
Creates C code for an fgraph, compiles it and returns callables
through make_thunk and make_function that make use of the compiled
code.
no_recycling can contain a list of Variables that belong to the fgraph.
If a Variable is in no_recycling, CLinker will clear the output storage
associated to it during the computation (to avoid reusing it).
"""
def __init__(self, schedule=None):
self.fgraph = None
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
"""WRITEME"""
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)().accept(fgraph, no_recycling)
# raise Exception("Cannot accept from a Linker that is already"
# " tied to another FunctionGraph.")
self.fgraph = fgraph
self.fetch_variables()
self.no_recycling = no_recycling
return self
def fetch_variables(self):
"""WRITEME
Fills the inputs, outputs, variables, orphans,
temps and node_order fields.
"""
fgraph = self.fgraph
self.inputs = fgraph.inputs
self.outputs = fgraph.outputs
self.node_order = self.schedule(fgraph)
# list(fgraph.variables)
# We need to include the unused inputs in our variables,
# otherwise we can't pass them to the module.
self.variables = [var for var in self.inputs if not len(var.clients)]
self.variables += graph.variables(self.inputs, self.outputs)
# This adds a hidden input which is the context for each node
# that needs it
self.contexts = dict()
for node in self.node_order:
ctx = node.run_context()
if ctx is not graph.NoContext:
# try to avoid creating more than one variable for the
# same context.
if ctx in self.contexts:
var = self.contexts[ctx]
assert var.type == node.context_type
var.clients.append((node, 'context'))
else:
var = graph.Constant(node.context_type, ctx)
var.clients = [(node, 'context')]
self.contexts[ctx] = var
self.variables.append(var)
# The orphans field is listified to ensure a consistent order.
# list(fgraph.orphans.difference(self.outputs))
self.orphans = list(r for r in self.variables
if isinstance(r, graph.Constant) and
r not in self.inputs)
self.temps = list(set(self.variables).difference(
self.inputs).difference(self.outputs).difference(self.orphans))
self.consts = []
def code_gen(self):
"""WRITEME
Generates code for a struct that does the computation of the fgraph and
stores it in the struct_code field of the instance.
If reuse_storage is True, outputs and temporaries will be stored in
the struct so they can be reused each time a function returned by
make_function is called, which means that the output of a call will
be invalidated by the next. If reuse_storage is False, that problem
is avoided.
This method caches its computations.
"""
if getattr(self, 'struct_code', False):
return self.struct_code
no_recycling = self.no_recycling
self.consts = []
c_support_code_apply = []
c_init_code_apply = []
symbol = {}
# (init_)tasks contains a list of pairs (Op/Variable, task_name)
# e.g. (x, 'get') or (x+y, 'code')
init_tasks = []
tasks = []
# (init_)blocks contain CodeBlock instances. There is a direct
# correspondance with (init_)tasks.
init_blocks = []
blocks = []
failure_var = "__failure"
id = 1
for variable in self.variables:
sub = dict(failure_var=failure_var)
# it might be possible to inline constant variables as C literals
# policy = [[what to declare in the struct,
# what to do at construction,
# what to do at destruction],
# [what to declare in each run,
# what to do at the beginning of each run,
# what to do at the end of each run]]
if variable in self.inputs:
# We need to extract the new inputs at each run
# they do not need to be relayed to Python, so we don't sync.
# If the variable is both an input and an output, there is
# no need to synchronize either, it is already up-to-date.
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_extract, get_c_cleanup]]
elif variable in self.orphans:
if not isinstance(variable, graph.Constant):
raise TypeError("All orphans to CLinker must be Constant"
" instances.", variable)
if isinstance(variable, graph.Constant):
try:
symbol[variable] = ("(" + variable.type.c_literal(
variable.data) + ")")
self.consts.append(variable)
self.orphans.remove(variable)
continue
except (utils.MethodNotDefined, NotImplementedError):
pass
# orphans are not inputs so we'll just get fetch them
# when we initialize the struct and assume they stay
# the same
policy = [[get_c_declare, get_c_extract, get_c_cleanup],
[get_nothing, get_nothing, get_nothing]]
elif variable in self.temps:
# temps don't need to be extracted from Python, so we
# call c_init rather than c_extract they do not need
# to be relayed to Python, so we don't sync
if variable.type.c_is_simple() or variable in no_recycling:
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_init, get_c_cleanup]]
else:
# it is useful for complex temps to reuse storage
# at each run, so we only clean up in the
# destructor
policy = [[get_c_declare, get_c_init, get_c_cleanup],
[get_nothing, get_nothing, get_nothing]]
elif variable in self.outputs:
if variable.type.c_is_simple() or variable in no_recycling:
# Do not extract output from Python
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_init,
(get_c_sync, get_c_cleanup)]]
else:
# We try to use the output that is pre-allocated.
# The linker will usually just reuse the storage
# from last run, but in the first execution,
# it will be None.
# We clean-up at each run to enable garbage collection
# in the Linker.
policy = [[get_nothing, get_nothing, get_nothing],
[get_c_declare, get_c_extract_out,
(get_c_sync, get_c_cleanup)]]
else:
raise Exception("what the fuck")
builder, block = struct_variable_codeblocks(variable, policy,
id, symbol, sub)
# each Variable generates two CodeBlocks, one to
# declare/initialize/destroy struct variables and the
# other to declare/extract/cleanup each time the function
# is run.
# Typically, only one of the two actually does anything
# (see all the possible combinations above)
init_tasks.append((variable, 'init', id))
init_blocks.append(builder)
tasks.append((variable, 'get', id + 1))
blocks.append(block)
id += 2
for node_num, node in enumerate(self.node_order):
sub = dict(failure_var=failure_var)
ctx = node.run_context()
if ctx is not graph.NoContext:
context_var = symbol[self.contexts[ctx]]
# The placeholder will be replaced by a hash of the entire
# code (module + support code) in DynamicModule.code.
# This ensures that, when defining functions in support code,
# we cannot have two different functions, in different modules,
# that have the same name.
# It was problematic, in particular, on Mac OS X (10.6 and 10.7)
# when defining CUDA kernels (with Cuda 4.2 and 5.0). See gh-1172.
name = "node_<<<<HASH_PLACEHOLDER>>>>_%i" % node_num
isyms = [symbol[r] for r in node.inputs]
osyms = [symbol[r] for r in node.outputs]
# Make the CodeBlock for c_code
sub['id'] = id
sub['fail'] = failure_code(sub)
if ctx is not graph.NoContext:
sub['context'] = context_var
sub_struct = dict()
sub_struct['id'] = id + 1
sub_struct['fail'] = failure_code_init(sub)
if ctx is not graph.NoContext:
# Since context inputs are always constants they are
# guaranteed to be available in the struct init code.
sub_struct['context'] = context_var
struct_support = ""
struct_init = ""
struct_cleanup = ""
op = node.op
# type-specific support code
try:
c_support_code_apply.append(op.c_support_code_apply(node,
name))
except utils.MethodNotDefined:
pass
else:
# The following will be executed if the "try" block succeeds
assert isinstance(c_support_code_apply[-1], string_types), (
str(node.op) +
" didn't return a string for c_support_code_apply")
try:
c_init_code_apply.append(op.c_init_code_apply(node, name))
except utils.MethodNotDefined:
pass
else:
assert isinstance(c_init_code_apply[-1], string_types), (
str(node.op) +
" didn't return a string for c_init_code_apply")
try:
struct_init = op.c_init_code_struct(node, name, sub_struct)
assert isinstance(struct_init, string_types), (
str(node.op) +
" didn't return a string for c_init_code_struct")
except utils.MethodNotDefined:
pass
try:
struct_support = op.c_support_code_struct(node, name)
assert isinstance(struct_support, string_types), (
str(node.op) +
" didn't return a string for c_support_code_struct")
except utils.MethodNotDefined:
pass
try:
struct_cleanup = op.c_cleanup_code_struct(node, name)
assert isinstance(struct_cleanup, string_types), (
str(node.op) +
" didn't return a string for c_cleanup_code_struct")
except utils.MethodNotDefined:
pass
# emit c_code
try:
behavior = op.c_code(node, name, isyms, osyms, sub)
except utils.MethodNotDefined:
raise NotImplementedError("%s cannot produce C code" % op)
assert isinstance(behavior, string_types), (
str(node.op) + " didn't return a string for c_code")
# To help understand what is following. It help read the c code.
# This prevent different op that generate the same c code
# to be merged, I suppose this won't happen...
behavior = ("// Op class " + node.op.__class__.__name__ + "\n" +
behavior)
try:
cleanup = op.c_code_cleanup(node, name, isyms, osyms, sub)
except utils.MethodNotDefined:
cleanup = ""
_logger.info('compiling un-versioned Apply %s', str(node))
blocks.append(CodeBlock("", behavior, cleanup, sub))
tasks.append((node, 'code', id))
id += 1
init_blocks.append(CodeBlock(struct_support, struct_init,
struct_cleanup, {'id': id}))
init_tasks.append((node, 'init', id))
id += 1
# List of arg names for use in struct_gen. Note the call to
# uniq: duplicate inputs must only be passed once because they
# are mapped to the same name. Duplicates are defined by (a
# is b), rather than (a==b) since Constant instances can
# compare equal to equivalent Constant instances.
args = []
args += ["storage_%s" % symbol[variable] for variable
in utils.uniq(self.inputs + self.outputs + self.orphans)]
# <<<<HASH_PLACEHOLDER>>>> will be replaced by a hash of the whole
# code in the file, including support code, in DynamicModule.code.
struct_name = '__struct_compiled_op_%s' % '<<<<HASH_PLACEHOLDER>>>>'
struct_code = struct_gen(args, init_blocks, blocks,
dict(failure_var=failure_var,
name=struct_name))
self.struct_code = struct_code
self.struct_name = struct_name
self.args = args
self.r2symbol = symbol
self.init_blocks = init_blocks
self.init_tasks = init_tasks
self.blocks = blocks
self.tasks = tasks
all_info = self.inputs + self.outputs + self.orphans
self.c_support_code_apply = c_support_code_apply
self.c_init_code_apply = c_init_code_apply
if (self.init_tasks, self.tasks) != self.get_init_tasks():
print("init_tasks\n", self.init_tasks, file=sys.stderr)
print(self.get_init_tasks()[0], file=sys.stderr)
print("tasks\n", self.tasks, file=sys.stderr)
print(self.get_init_tasks()[1], file=sys.stderr)
assert (self.init_tasks, self.tasks) == self.get_init_tasks()
# List of indices that should be ignored when passing the arguments
# (basically, everything that the previous call to uniq eliminated)
self.dupidx = [i for i, x in enumerate(all_info)
if all_info.count(x) > 1 and all_info.index(x) != i]
return self.struct_code
def support_code(self):
"""WRITEME
Returns a list of support code strings that are needed by
one or more Variables or Ops. The support code from Variables is
added before the support code from Ops.
This might contain duplicates.
"""
ret = []
# generic support code
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret.append(x.c_support_code())
except utils.MethodNotDefined:
pass
return ret
def compile_args(self):
"""WRITEME
Returns a list of compile args that are needed by one
or more Variables or Ops.
This might contain duplicates.
"""
ret = ["-O3"]
# this is the param the -ffast-math activate. I put the explicitly as
# FillMissing must disable some of them. Putting -ffast-math would
# make it disable all other parameter at the same time.
ret += ["-fno-math-errno",
# "-funsafe-math-optimizations",
# "-fno-signaling-nans",
# "-fcx-limited-range",
# "-fno-rounding-math",
# "-ffinite-math-only",
# the current code generate label event if they are not used.
# Could use gcc attribute for those label only
"-Wno-unused-label",
"-Wno-unused-variable", # idem as the precedent
"-Wno-write-strings", # generated by our code generator...
]
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_compile_args()
except utils.MethodNotDefined:
pass
c_compiler = self.c_compiler()
ret = utils.uniq(ret) # to remove duplicate
# The args set by the compiler include the user flags. We do not want
# to reorder them
ret += c_compiler.compile_args()
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
for i in x.c_no_compile_args():
try:
ret.remove(i)
except ValueError:
pass # in case the value is not there
except utils.MethodNotDefined:
pass
return ret
def headers(self):
"""WRITEME
Returns a list of headers that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_headers()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def init_code(self):
"""
Return a list of code snippets that have to be inserted
in the module initialization code.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_init_code()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def c_compiler(self):
c_compiler = None
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
if hasattr(x, 'c_compiler'):
x_compiler = x.c_compiler()
else:
continue
if c_compiler is None:
c_compiler = x_compiler
else:
if x_compiler and (x_compiler != c_compiler):
raise Exception('Nodes have requested specific'
' different compilers',
(c_compiler, x_compiler))
if (c_compiler is None):
return cmodule.GCC_compiler
else:
return c_compiler
def header_dirs(self):
"""WRITEME
Returns a list of lib directories that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_header_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def libraries(self):
"""WRITEME
Returns a list of libraries that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_libraries()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def lib_dirs(self):
"""WRITEME
Returns a list of lib directories that are needed by one
or more Types or Ops.
The return value will not contain duplicates.
"""
ret = []
for x in [y.type for y in self.variables] + [
y.op for y in self.node_order]:
try:
ret += x.c_lib_dirs()
except utils.MethodNotDefined:
pass
return utils.uniq(ret)
def __compile__(self, input_storage=None,
output_storage=None, keep_lock=False):
"""WRITEME
Compiles this linker's fgraph.
@type input_storage: list or None
@param input_storage: list of lists of length 1. In order to use
the thunk returned by __compile__, the inputs must be put in
that storage. If None, storage will be allocated.
@param output_storage: list of lists of length 1. The thunk returned
by __compile__ will put the variables of the computation in these
lists. If None, storage will be allocated.
Returns: thunk, input_storage, output_storage, error_storage
"""
error_storage = [None, None, None]
if input_storage is None:
input_storage = tuple([None] for variable in self.inputs)
if output_storage is None:
map = {}
output_storage = []
# Initialize the map with the inputs, as some outputs may
# be inputs as well.
for i, variable in enumerate(self.inputs):
map[variable] = input_storage[i]
for variable in self.outputs:
if variable not in map:
map[variable] = [None]
output_storage.append(map[variable])
input_storage = tuple(input_storage)
output_storage = tuple(output_storage)
thunk = self.cthunk_factory(error_storage,
input_storage,
output_storage,
keep_lock=keep_lock)
return (thunk,
[link.Container(input, storage) for input, storage in
izip(self.fgraph.inputs, input_storage)],
[link.Container(output, storage, True) for output, storage in
izip(self.fgraph.outputs, output_storage)],
error_storage)
def get_init_tasks(self):
init_tasks = []
tasks = []
id = 1
for v in self.variables:
if v in self.consts:
continue
if v in self.orphans and isinstance(v, graph.Constant):
try:
# constant will be inlined, no need to get
v.type.c_literal(v.data)
continue
except (utils.MethodNotDefined, NotImplementedError):
pass
init_tasks.append((v, 'init', id))
tasks.append((v, 'get', id + 1))
id += 2
for node in self.node_order:
tasks.append((node, 'code', id))
init_tasks.append((node, 'init', id + 1))
id += 2
return init_tasks, tasks
def make_thunk(self, input_storage=None, output_storage=None,
keep_lock=False):
"""WRITEME
Compiles this linker's fgraph and returns a function to perform the
computations, as well as lists of storage cells for both the
inputs and outputs.
@type input_storage: list or None
@param input_storage: list of lists of length 1. In order to use
the thunk returned by __compile__, the inputs must be put in
that storage. If None, storage will be allocated.
@param output_storage: list of lists of length 1. The thunk returned
by __compile__ will put the variables of the computation in these
lists. If None, storage will be allocated.
Returns: thunk, input_storage, output_storage
The return values can be used as follows:
f, istor, ostor = clinker.make_thunk()
istor[0].data = first_input
istor[1].data = second_input
f()
first_output = ostor[0].data
"""
init_tasks, tasks = self.get_init_tasks()
cthunk, in_storage, out_storage, error_storage = self.__compile__(
input_storage, output_storage,
keep_lock=keep_lock)
res = _CThunk(cthunk, init_tasks, tasks, error_storage)
res.nodes = self.node_order
return res, in_storage, out_storage
def cmodule_key(self):
"""Return a complete hashable signature of the module we compiled.
This function must have the property that no two programs that
compute different things yield the same key.
The key returned by this function is of the form (version, signature)
The signature has the following form:
{{{
'CLinker.cmodule_key', compilation args, libraries,
header_dirs, numpy ABI version, config md5,
(op0, input_signature0, output_signature0),
(op1, input_signature1, output_signature1),
...
(opK, input_signatureK, output_signatureK),
}}}
The signature is a tuple, some elements of which are sub-tuples.
The outer tuple has a brief header, containing the compilation options
passed to the compiler, the libraries to link against, an md5 hash
of theano.config (for all config options where "in_c_key" is True).
It is followed by elements for every node in the
topological ordering of `self.fgraph`.
If the Op of any Apply in the FunctionGraph does not have
c_code_cache_ok()==True, then this function raises a KeyError
exception.
Input Signature
---------------
Each input signature is a tuple with an element for each input
to the corresponding Apply node. Each element identifies the
type of the node input, and the nature of that input in the
graph.
The nature of a typical variable is encoded by integer pairs
``((a,b),c)``:
``a`` is the topological position of the input's owner
(-1 for graph inputs),
``b`` is the index of the variable in the owner's output list.
``c`` is a flag indicating whether the variable is in the
no_recycling set.
If a variable is also a graph output, then its position in the
outputs list is also bundled with this tuple (after the b).
The nature of a Constant instance is defined as its signature,
together with two integers: the topological position of the
first Apply using that Constant instance, and the lowest index
into that Apply's inputs that refers to that Constant. (These
two integers are a surrogate for the id() of the Constant.
The integers are important because merge-able constants have
the same signature, but require separate containers in C
code.) The membership in no_recycling is also included in the
signature.
Output Signature
----------------
The outputs of a node are entirely determined by the node's Op
and the nature of the inputs, but the set of outputs that may
be re-used by the computation (the elements of
self.no_recycling) can affect the code that is generated.
The format of each Op's output signature is a (version, no_recycle)
pair, where version is incremented if codegen() changes how it
handles the outputs, and no_recycle is simply a list of
booleans, indicating whether each output is in the
no_recycling set. Older versions of compiled modules only have the
no_recycle list.
"""
return self.cmodule_key_(self.fgraph, self.no_recycling,
compile_args=self.compile_args(),
libraries=self.libraries(),
header_dirs=self.header_dirs(),
c_compiler=self.c_compiler(),
)
def cmodule_key_(self, fgraph, no_recycling, compile_args=None,
libraries=None, header_dirs=None, insert_config_md5=True,
c_compiler=None):
"""
Do the actual computation of cmodule_key in a static method
to allow it to be reused in scalar.Composite.__eq__
"""
if compile_args is None:
compile_args = []
if libraries is None:
libraries = []
if header_dirs is None:
header_dirs = []
order = self.schedule(fgraph)
# set of variables that have been computed by nodes we have
# seen 'so far' in the loop below
fgraph_computed_set = set()
fgraph_inputs_dict = dict((i, (-1, pos)) for pos, i in
enumerate(fgraph.inputs))
constant_ids = dict()
op_pos = {} # Apply -> topological position
# First we put the header, compile_args, library names and config md5
# into the signature.
sig = ['CLinker.cmodule_key'] # will be cast to tuple on return
if compile_args is not None:
# We must sort it as the order from a set is not guaranteed.
# In particular, 2 sets with the same content can give different
# order depending on the order you put data in it.
# Sets are used to remove duplicate elements.
args = sorted(compile_args)
args = tuple(args)
sig.append(args)
if libraries is not None:
# see comments for compile_args
args = sorted(libraries)
args = tuple(args)
sig.append(args)
if header_dirs is not None:
args = sorted(header_dirs)
args = tuple(args)
sig.append(args)
# We must always add the numpy ABI version here as
# DynamicModule always add the include <numpy/arrayobject.h>
sig.append('NPY_ABI_VERSION=0x%X' %
numpy.core.multiarray._get_ndarray_c_version())
if c_compiler:
sig.append('c_compiler_str=' + c_compiler.version_str())
# IMPORTANT: The 'md5' prefix is used to isolate the compilation
# parameters from the rest of the key. If you want to add more key
# elements, they should be before this md5 hash if and only if they
# can lead to a different compiled file with the same source code.
if insert_config_md5:
sig.append('md5:' + theano.configparser.get_config_md5())
else:
sig.append('md5: <omitted>')
error_on_play = [False]
def in_sig(i, topological_pos, i_idx):
# assert that every input to every node is one of'
# - an fgraph input
# - an output from a node in the FunctionGraph
# - a Constant
# It is important that a variable (i)
# yield a 'position' that reflects its role in code_gen()
if isinstance(i, graph.Constant): # orphans
if id(i) not in constant_ids:
isig = (i.signature(), topological_pos, i_idx)
# If the Theano constant provides a strong hash
# (no collision for transpose, 2, 1, 0, -1, -2,
# 2 element swapped...) we put this hash in the signature
# instead of the value. This makes the key file much
# smaller for big constant arrays. Before this, we saw key
# files up to 80M.
if hasattr(isig[0], "theano_hash"):
isig = (isig[0].theano_hash(), topological_pos, i_idx)
try:
hash(isig)
except Exception:
# generic constants don't have a hashable signature
error_on_play[0] = True
return None
constant_ids[id(i)] = isig
else:
isig = constant_ids[id(i)]
# print 'SIGNATURE', i.signature()
# return i.signature()
elif i in fgraph_inputs_dict: # inputs
isig = fgraph_inputs_dict[i]
else:
if i.owner is None:
assert all(all(out is not None for out in o.outputs)
for o in order)
assert all(input.owner is None for input in fgraph.inputs)
raise Exception('what is this?', (i, type(i), i.clients,
fgraph))
if i in fgraph.outputs:
isig = (op_pos[i.owner], # outputs
i.owner.outputs.index(i),
fgraph.outputs.index(i))
else:
isig = (op_pos[i.owner], i.owner.outputs.index(i)) # temps
return (isig, i in no_recycling)
version = []
for node_pos, node in enumerate(order):
try:
# Pure Ops do not have a c_code_cache_version_apply ...
version.append(node.op.c_code_cache_version_apply(node))
except AttributeError:
pass
for i in node.inputs:
version.append(i.type.c_code_cache_version())
for o in node.outputs:
version.append(o.type.c_code_cache_version())
# add the signature for this node
sig.append((
node.op,
tuple((i.type, in_sig(i, node_pos, ipos))
for ipos, i in enumerate(node.inputs)),
(1, # Increment if cmodule change its handling of outputs
tuple(o in no_recycling for o in node.outputs))))
if error_on_play[0]:
# if one of the signatures is not hashable
# then bypass the cache mechanism and
# compile fresh every time
return None
op_pos[node] = node_pos
fgraph_computed_set.update(node.outputs)
# Add not used input in the key
for ipos, var in [(i, var) for i, var in enumerate(fgraph.inputs)
if not len(var.clients)]:
sig.append((var.type, in_sig(var, -1, ipos)))
# crystalize the signature and version
sig = tuple(sig)
version = tuple(version)
for v in version:
if not v:
# one of the ops or types here is unversioned,
# so this fgraph is entirely unversioned
return ((), sig)
return version, sig
def get_src_code(self):
mod = self.get_dynamic_module()
return mod.code()
def compile_cmodule(self, location=None):
"""
This compiles the source code for this linker and returns a
loaded module.
"""
if location is None:
location = cmodule.dlimport_workdir(config.compiledir)
mod = self.get_dynamic_module()
c_compiler = self.c_compiler()
libs = self.libraries()
preargs = self.compile_args()
compiler_name = c_compiler.__name__
if compiler_name == 'NVCC_compiler' and config.lib.amdlibm:
# This lib does not work correctly with nvcc in device code.
# and newer version of g++ as 4.5.1.
# example of errors: "/usr/lib/gcc/x86_64-redhat-linux/4.5.1/
# include/mmintrin.h(49): error: identifier
# "__builtin_ia32_emms" is undefined"
if '<amdlibm.h>' in mod.includes:
mod.includes.remove('<amdlibm.h>')
if '-DREPLACE_WITH_AMDLIBM' in preargs:
preargs.remove('-DREPLACE_WITH_AMDLIBM')
if 'amdlibm' in libs:
libs.remove('amdlibm')
# We want to compute the code without the lock
src_code = mod.code()
get_lock()
try:
_logger.debug("LOCATION %s", str(location))
module = c_compiler.compile_str(
module_name=mod.code_hash,
src_code=src_code,
location=location,
include_dirs=self.header_dirs(),
lib_dirs=self.lib_dirs(),
libs=libs,
preargs=preargs)
except Exception as e:
e.args += (str(self.fgraph),)
raise
finally:
release_lock()
return module
def get_dynamic_module(self):
"""Return a cmodule.DynamicModule instance full of the code
for our fgraph.
This method is cached on the first call so it can be called
multiple times without penalty.
"""
if not hasattr(self, '_mod'):
self.code_gen()
mod = cmodule.DynamicModule()
# The code of instantiate
# the 1 is for error_storage
code = self.instantiate_code(1 + len(self.args))
instantiate = cmodule.ExtFunction('instantiate', code,
method=cmodule.METH_VARARGS)
# ['error_storage'] + argnames,
# local_dict = d,
# global_dict = {})
# Static methods that can run and destroy the struct built by
# instantiate.
if PY3:
static = """
static int {struct_name}_executor({struct_name} *self) {{
return self->run();
}}
static void {struct_name}_destructor(PyObject *capsule) {{
{struct_name} *self = ({struct_name} *)PyCapsule_GetContext(capsule);
delete self;
}}
""".format(struct_name=self.struct_name)
else:
static = """
static int %(struct_name)s_executor(%(struct_name)s* self) {
return self->run();
}
static void %(struct_name)s_destructor(void* executor, void* self) {
delete ((%(struct_name)s*)self);
}
""" % dict(struct_name=self.struct_name)
# We add all the support code, compile args, headers and libs we need.
for support_code in self.support_code() + self.c_support_code_apply:
mod.add_support_code(support_code)
mod.add_support_code(self.struct_code)
mod.add_support_code(static)
mod.add_function(instantiate)
for header in self.headers():
mod.add_include(header)
for init_code_block in self.init_code() + self.c_init_code_apply:
mod.add_init_code(init_code_block)
self._mod = mod
return self._mod
def cthunk_factory(self, error_storage, in_storage, out_storage,
keep_lock=False):
"""WRITEME
error_storage -> list of length 3
in_storage -> list of lists of length 1, one per input
out_storage -> list of lists of length 1, one per output
Returns a thunk that points to an instance of a C struct that
can carry on the computation of this linker's fgraph. That thunk,
when executed, will fetch its inputs from in_storage, put its
outputs in out_storage and if an error occurs will put the
type, value and traceback of the exception in error_storage.
"""
try:
key = self.cmodule_key()
except KeyError:
key = None
if key is None:
# If we can't get a key, then forget the cache mechanism.
module = self.compile_cmodule()
else:
module = get_module_cache().module_from_key(
key=key, lnk=self, keep_lock=keep_lock)
vars = self.inputs + self.outputs + self.orphans
# List of indices that should be ignored when passing the arguments
# (basically, everything that the previous call to uniq eliminated)
dupidx = [i for i, x in enumerate(vars)
if vars.count(x) > 1 and vars.index(x) != i]
out_storage = [x for i, x in enumerate(out_storage)
if (i + len(in_storage)) not in dupidx]
in_storage = [x for i, x in enumerate(in_storage) if i not in dupidx]
orphd = [[orphan.data] for orphan in self.orphans]
ret = module.instantiate(error_storage,
*(in_storage + out_storage + orphd))
return ret
def instantiate_code(self, n_args):
code = StringIO()
struct_name = self.struct_name
print("static PyObject * instantiate(PyObject * self, PyObject *argtuple) {", file=code)
print(' assert(PyTuple_Check(argtuple));', file=code)
print(' if (%(n_args)i != PyTuple_Size(argtuple)){ ' % locals(), file=code)
print(' PyErr_Format(PyExc_TypeError, "Wrong number of arguments, expected %(n_args)i, got %%i", (int)PyTuple_Size(argtuple));' % locals(), file=code)
print(' return NULL;', file=code)
print(' }', file=code)
print(' %(struct_name)s* struct_ptr = new %(struct_name)s();' % locals(), file=code)
print(' if (struct_ptr->init(', ','.join('PyTuple_GET_ITEM(argtuple, %i)' % n for n in xrange(n_args)), ') != 0) {', file=code)
print(' delete struct_ptr;', file=code)
print(' return NULL;', file=code)
print(' }', file=code)
if PY3:
print("""\
PyObject* thunk = PyCapsule_New((void*)(&{struct_name}_executor), NULL, {struct_name}_destructor);
if (thunk != NULL && PyCapsule_SetContext(thunk, struct_ptr) != 0) {{
PyErr_Clear();
Py_DECREF(thunk);
thunk = NULL;
}}
""".format(**locals()), file=code)
else:
print(' PyObject* thunk = PyCObject_FromVoidPtrAndDesc((void*)(&%(struct_name)s_executor), struct_ptr, %(struct_name)s_destructor);' % locals(), file=code)
print(" return thunk; }", file=code)
return code.getvalue()
class _CThunk(object):
"""
A thunk with a C implementation
"""
def __init__(self, cthunk, init_tasks, tasks, error_storage):
"""
Parameters
----------
cthunk: the CObject pointer used by run_cthunk
init_tasks: WRITEME
tasks: WRITEME
error_storage: WRITEME
"""
global run_cthunk
if run_cthunk is None:
# Lazy import to avoid compilation when importing theano.
from theano.gof.cutils import run_cthunk # noqa
self.cthunk = cthunk
self.init_tasks = init_tasks
self.tasks = tasks
self.error_storage = error_storage
def find_task(self, failure_code):
"""
Maps a failure code to the task that is associated to it.
"""
failure_code -= 1
n = len(self.init_tasks)
# note that the failure code is distributed in two lists
if failure_code < 2 * n:
return [self.init_tasks, self.tasks][
failure_code % 2][failure_code // 2]
else:
return self.tasks[failure_code - n]
def __call__(self):
failure = run_cthunk(self.cthunk)
if failure:
task, taskname, id = self.find_task(failure)
try:
trace = task.trace
except AttributeError:
trace = ()
try:
exc_type, _exc_value, exc_trace = self.error_storage
if task in self.nodes:
self.position_of_error = self.nodes.index(task)
# this can be used to retrieve the location the Op was declared
exc_value = exc_type(_exc_value)
exc_value.__thunk_trace__ = trace
except Exception:
print(('ERROR retrieving error_storage.'
'Was the error set in the c code?'),
end=' ', file=sys.stderr)
print(self.error_storage, file=sys.stderr)
raise
reraise(exc_type, exc_value, exc_trace)
class OpWiseCLinker(link.LocalLinker):
"""WRITEME
Uses CLinker on the individual Ops that comprise an fgraph and loops
over them in Python. The variable is slower than a compiled version of
the whole fgraph, but saves on compilation time because small changes
in the computation graph won't necessarily trigger any recompilation,
only local changes in the Variables or Ops that are used.
If fallback_on_perform is True, OpWiseCLinker will use an op's
perform method if no C version can be generated.
no_recycling can contain a list of Variables that belong to the fgraph.
If a Variable is in no_recycling, CLinker will clear the output storage
associated to it prior to computation (to avoid reusing it).
:note: This is in a sense the 'default' linker for Theano. The
overhead of using the OpWiseCLinker as compared with the CLinker
is only noticeable for graphs of very small tensors (such as 20
elements or less)
"""
__cache__ = {}
def __init__(self,
fallback_on_perform=True,
allow_gc=None,
nice_errors=True,
schedule=None):
if allow_gc is None:
allow_gc = config.allow_gc
self.fgraph = None
self.fallback_on_perform = fallback_on_perform
self.nice_errors = nice_errors
self.allow_gc = allow_gc
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(
fallback_on_perform=self.fallback_on_perform,
allow_gc=self.allow_gc,
nice_errors=self.nice_errors
).accept(fgraph, no_recycling)
# raise Exception("Cannot accept from a Linker that is
# already tied to another FunctionGraph.")
self.fgraph = fgraph
self.no_recycling = no_recycling
return self
def make_all(self, profiler=None, input_storage=None, output_storage=None):
# The lock will be acquired when we compile the first
# C code. We will keep the lock untill all the function
# compilation will be finished. This allow to don't
# require the lock when all c code are already compiled!
orig_n_lock = getattr(get_lock, "n_lock", 0)
try:
fgraph = self.fgraph
order = self.schedule(fgraph)
no_recycling = self.no_recycling
input_storage, output_storage, storage_map = link.map_storage(
fgraph, order, input_storage, output_storage)
if self.allow_gc:
computed, last_user = link.gc_helper(order)
post_thunk_old_storage = []
else:
post_thunk_old_storage = None
compute_map = {}
for k in storage_map:
compute_map[k] = [k.owner is None]
thunks = []
for node in order:
# Maker sure we use the C version of the code whenever
# possible
# There are ops that don't have _op_use_c_code property
# for example ifelse (or any ops that come with their own
# make_thunk
old_value = getattr(node.op, '_op_use_c_code', False)
try:
if theano.config.cxx:
node.op._op_use_c_code = True
thunks += [node.op.make_thunk(node,
storage_map,
compute_map,
no_recycling)]
thunks[-1].inputs = [storage_map[v] for v in node.inputs]
thunks[-1].outputs = [storage_map[v] for v in node.outputs]
finally:
node.op._op_use_c_code = old_value
for node in order:
if self.allow_gc:
post_thunk_old_storage.append(
[storage_map[input] for input in node.inputs
if ((input in computed) and
(input not in fgraph.outputs) and
node == last_user[input])])
if no_recycling is True:
no_recycling = list(storage_map.values())
no_recycling = utils.difference(no_recycling, input_storage)
else:
no_recycling = [storage_map[r]
for r in no_recycling if r not in fgraph.inputs]
f = link.streamline(fgraph, thunks, order,
post_thunk_old_storage,
no_recycling=no_recycling,
nice_errors=self.nice_errors)
f.allow_gc = self.allow_gc
finally:
# Release lock on compilation directory.
if getattr(get_lock, "n_lock", 0) > orig_n_lock:
release_lock()
assert get_lock.n_lock == orig_n_lock
return (f,
[link.Container(input, storage)
for input, storage in izip(fgraph.inputs, input_storage)],
[link.Container(output, storage, True)
for output, storage in izip(fgraph.outputs, output_storage)],
thunks,
order)
def _default_checker(x, y):
"""WRITEME
Default checker for DualLinker. This checks that the
variables contain the same data using ==.
"""
if x[0] != y[0]:
raise Exception("Output mismatch.",
{'performlinker': x[0], 'clinker': y[0]})
class DualLinker(link.Linker):
"""WRITEME
Runs the fgraph in parallel using PerformLinker and CLinker.
The thunk/function produced by DualLinker uses PerformLinker as the
"main" implementation: the inputs and outputs are fed to/taken from
the Ops' perform. However, DualLinker also instantiates a copy of
the fgraph on which it runs OpWiseCLinker. At each step, the variables
of perform and of the C implementation are verified using a checker
function.
"""
def __init__(self, checker=_default_checker, schedule=None):
"""
Initialize a DualLinker.
The checker argument must be a function that takes two lists
of length 1. The first one passed will contain the output
computed by PerformLinker and the second one the output
computed by OpWiseCLinker. The checker should compare the data
fields of the two variables to see if they match. By default,
DualLinker uses ==. A custom checker can be provided to
compare up to a certain error tolerance.
If a mismatch occurs, the checker should raise an exception to
halt the computation. If it does not, the computation will
carry on and errors will snowball. The checker can sidestep
the problem by fiddling with the data, but it should be
careful not to share data between the two outputs (or inplace
operations that use them will interfere).
no_recycling can contain a list of Variables that belong to the fgraph.
If a Variable is in no_recycling, CLinker will clear the output storage
associated to it during the computation (to avoid reusing it).
"""
self.fgraph = None
self.checker = checker
if schedule:
self.schedule = schedule
def accept(self, fgraph, no_recycling=None):
if no_recycling is None:
no_recycling = []
if self.fgraph is not None and self.fgraph is not fgraph:
return type(self)(self.checker).accept(fgraph, no_recycling)
self.fgraph = fgraph
self.no_recycling = no_recycling
return self
def make_thunk(self, **kwargs):
fgraph = self.fgraph
no_recycling = self.no_recycling
_f, i1, o1, thunks1, order1 = (
link.PerformLinker(schedule=self.schedule).accept(
fgraph, no_recycling=no_recycling).make_all(**kwargs))
kwargs.pop('input_storage', None)
_f, i2, o2, thunks2, order2 = (
OpWiseCLinker(schedule=self.schedule).accept(
fgraph, no_recycling=no_recycling).make_all(**kwargs))
def f():
for input1, input2 in izip(i1, i2):
# Set the inputs to be the same in both branches.
# The copy is necessary in order for inplace ops not to
# interfere.
input2.storage[0] = copy(input1.storage[0])
for thunk1, thunk2, node1, node2 in izip(thunks1, thunks2,
order1, order2):
for output, storage in izip(node1.outputs, thunk1.outputs):
if output in no_recycling:
storage[0] = None
for output, storage in izip(node2.outputs, thunk2.outputs):
if output in no_recycling:
storage[0] = None
try:
thunk1()
thunk2()
for output1, output2 in izip(thunk1.outputs,
thunk2.outputs):
self.checker(output1, output2)
except Exception:
link.raise_with_op(node1)
return f, i1, o1
class HideC(object):
def __hide(*args):
raise utils.MethodNotDefined()
c_code = __hide
c_code_cleanup = __hide
c_headers = __hide
c_header_dirs = __hide
c_libraries = __hide
c_lib_dirs = __hide
c_support_code = __hide
c_support_code_apply = __hide
c_compile_args = __hide
c_no_compile_args = __hide
c_init_code = __hide
c_init_code_apply = __hide
c_init_code_struct = __hide
c_support_code_struct = __hide
c_cleanup_code_struct = __hide
def c_code_cache_version(self):
return ()
def c_code_cache_version_apply(self, node):
return self.c_code_cache_version()
| mit | -3,159,184,381,697,750,500 | 38.380266 | 168 | 0.556333 | false | 4.167908 | false | false | false |
southampton/unimatrix | deskctl/lib/errors.py | 1 | 3040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from deskctl import app
from flask import g, render_template, make_response, session, request
import traceback
################################################################################
## standard error (uses render_template and thus standard page layout)
def stderr(title,message,code=200,template="error.html"):
"""This function is called by other error functions to show the error to the
end user. It takes an error title and an error message.
"""
# Should we show a traceback?
if app.debug:
debug = traceback.format_exc()
else:
debug = ""
return render_template(template,title=title,message=message,debug=debug), code
################################################################################
## fatal error (returns HTML from python code - which is more likely to work)
def fatalerr(title=u"fatal error ☹",message="Whilst processing your request an unexpected error occured which the application could not recover from",debug=None):
# Should we show a traceback?
if debug is None:
if app.debug:
debug = traceback.format_exc()
else:
debug = "Please ask your administrator to consult the error log for more information."
# Build the response. Not using a template here to prevent any Jinja
# issues from causing this to fail.
html = u"""
<!doctype html>
<html>
<head>
<title>Fatal Error</title>
<meta charset="utf-8" />
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<style type="text/css">
body {
background-color: #8B1820;
color: #FFFFFF;
margin: 0;
padding: 0;
font-family: "Open Sans", "Helvetica Neue", Helvetica, Arial, sans-serif;
}
h1 {
font-size: 4em;
font-weight: normal;
margin: 0px;
}
div {
width: 80%%;
margin: 5em auto;
padding: 50px;
border-radius: 0.5em;
}
@media (max-width: 900px) {
div {
width: auto;
margin: 0 auto;
border-radius: 0;
padding: 1em;
}
}
</style>
</head>
<body>
<div>
<h1>%s</h1>
<p>%s</p>
<pre>%s</pre>
</div>
</body>
</html>
""" % (title,message,debug)
return make_response(html, 500)
################################################################################
## log a full error to the python logger
def logerr():
# Get the username
if 'username' in session:
username = session['username']
else:
username = 'Not logged in'
## Log the critical error (so that it goes to e-mail)
app.logger.error("""Request details:
HTTP Path: %s
HTTP Method: %s
Client IP Address: %s
User Agent: %s
User Platform: %s
User Browser: %s
User Browser Version: %s
Username: %s
Traceback:
%s
""" % (
request.path,
request.method,
request.remote_addr,
request.user_agent.string,
request.user_agent.platform,
request.user_agent.browser,
request.user_agent.version,
username,
traceback.format_exc(),
))
| gpl-3.0 | 6,697,952,601,230,375,000 | 23.699187 | 162 | 0.602041 | false | 3.356906 | false | false | false |
lynchnf/maneki-neko-web | socialmedia/models.py | 1 | 1095 | from cms.models.pluginmodel import CMSPlugin
from django.utils.translation import ugettext_lazy as _
from django.db import models
ICON_CHOICES = (
("fa-delicious", _("Delicious")),
("fa-digg", _("Digg")),
("fa-facebook", _("Facebook")),
("fa-flickr", _("Flickr")),
("fa-google-plus", _("Google+")),
("fa-instagram", _("Instagram")),
("fa-linkedin", _("LinkedIn")),
("fa-map-marker", _("Map")),
("fa-pinterest", _("Pinterest")),
("fa-rss", _("RSS feed")),
("fa-reddit", _("reddit")),
("fa-spotify", _("Spotify")),
("fa-stumbleupon", _("StumbleUpon")),
("fa-tumblr", _("Tumblr")),
("fa-twitter", _("Twitter")),
("fa-youtube-play", _("YouTube")))
SIZE_CHOICES = [(i,i) for i in range(6)]
class SocialLink(CMSPlugin):
icon = models.CharField("Social Network Icon", max_length=20, choices=ICON_CHOICES)
size = models.IntegerField("Icon Size", default=0, choices=SIZE_CHOICES)
url = models.URLField("URL")
def __unicode__(self):
return self.url | mit | -454,674,423,884,372,100 | 33.25 | 87 | 0.552511 | false | 3.46519 | false | false | false |
HUGG/NGWM2016-modelling-course | Lessons/06-Rheology-of-the-lithosphere/scripts/solutions/strength-envelope-uniform-crust.py | 1 | 7747 | '''
strength-envelope-uniform-crust.py
This script can be used for plotting strength envelopes for a lithosphere with
a uniform crust. The script includes a function sstemp() that can be used for
calculating the lithospheric temperature as a function of the input material
properties
dwhipp 01.16 (modified from code written by L. Kaislaniemi)
'''
# --- USER INPUT PARAMETERS ---
# Model geometry
z_surf = 0.0 # Elevation of upper surface [km]
z_bott = 100.0 # Elevation of bottom boundary [km]
nz = 100 # Number of grid points
# Boundary conditions
T_surf = 0.0 # Temperature of the upper surface [deg C]
q_surf = 65.0 # Surface heat flow [mW/m^2]
# Thermal conductivity (constant across model thickness)
k = 2.75 # Thermal conductivity [W/m K]
# Deformation rate
edot = 1.0e-15 # Reference strain rate [1/s]
# Constants
g = 9.81 # Gravitational acceleration [m/s^2]
R = 8.314 # Gas constant
# MATERIAL PROPERTY DEFINITIONS
# Crust (Wet quartzite - Gleason and Tullis, 1995)
mat1 = 'Wet quartzite'
L1 = 35.0 # Thickness of layer one [km]
A1 = 1.1 # Average heat production rate for crust [uW/m^3]
rho1 = 2800.0 # Rock density [kg/m^3]
Avisc1 = 1.1e-4 # Viscosity constant [MPa^-n s^-1]
Q1 = 223.0 # Activation energy [kJ/mol]
n1 = 4.0 # Power-law exponent
mu1 = 0.85 # Friction coefficient
C1 = 0.0 # Cohesion [MPa]
# Mantle (Wet olivine - Hirth and Kohlstedt, 1996)
mat2 = 'Wet olivine'
A2 = 0.02 # Heat production rate for mantle [uW/m^3]
rho2 = 3300.0 # Rock density [kg/m^3]
Avisc2 = 4.876e6 # Viscosity constant [MPa^-n s^-1]
Q2 = 515.0 # Activation energy [kJ/mol]
n2 = 3.5 # Power-law exponent
mu2 = 0.6 # Friction coefficient
C2 = 60.0 # Cohesion [MPa]
# END MATERIAL PROPERTY DEFINITIONS
# --- END USER INPUTS ---
# Import libraries
import numpy as np
import matplotlib.pyplot as plt
# Define function to calculate temperatures (DO NOT MODIFY)
def sstemp(A,k,dz,nz,T_surf,q_surf):
# Generate an empty array for temperature values
T = np.zeros(nz)
# Set boundary conditions
# the upper surface temperature and the temperature at one grid point below
T[0] = T_surf
## Grid point one needs special handling as T[-1] is not available
# Calculate "ghost point" outside the model domain, where grid point -1
# would be, assuming surface heat flow q_surf
Tghost = T[0] - q_surf * dz / k # = "T[-1]"
# Use the same finite difference formula to calculate T as for
# the inner points, but replace "T[-1]" by ghost point value
T[1] = -A[1] * dz**2 / k - Tghost + 2*T[0]
# Calculate temperatures across specified thickness
for i in range(2, nz): # NB! Grid points 0 and 1 omitted as they cannot be calculated
T[i] = -A[i] * dz**2 / k - T[i-2] + 2*T[i-1]
return T
# Define conversion factors
km2m = 1.0e3 # [km] to [m]
mW2W = 1.0e-3 # [mW] to [W]
uW2W = 1.0e-6 # [uW] to [W]
MPa2Pa = 1.0e6 # [MPa] to [Pa]
kJ2J = 1.0e3 # [kJ] to [J]
# Convert material property units to SI
z_surf = z_surf * km2m
z_bott = z_bott * km2m
q_surf = q_surf * mW2W
A1 = A1 * uW2W
A2 = A2 * uW2W
L1 = L1 * km2m
Avisc1 = Avisc1 / MPa2Pa**n1
Avisc2 = Avisc2 / MPa2Pa**n2
Q1 = Q1 * kJ2J
Q2 = Q2 * kJ2J
C1 = C1 * MPa2Pa
C2 = C2 * MPa2Pa
# Generate the grid
# Regular grid is used, so that in FD calculations
# only dz is needed. Array z is used only for plotting.
dz = (z_bott - z_surf) / (nz - 1)
z = np.linspace(z_surf, z_bott, nz)
# Generate the material properties arrays
A = np.zeros(nz)
rho = np.zeros(nz)
Avisc = np.zeros(nz)
Q = np.zeros(nz)
n = np.zeros(nz)
mu = np.zeros(nz)
C = np.zeros(nz)
for i in range(nz):
# Fill material property arrays for depths in the crust
if z[i] <= L1:
A[i] = A1
rho[i] = rho1
Avisc[i] = Avisc1
Q[i] = Q1
n[i] = n1
mu[i] = mu1
C[i] = C1
# Fill material property arrays for depths in the mantle
else:
A[i] = A2
rho[i] = rho2
Avisc[i] = Avisc2
Q[i] = Q2
n[i] = n2
mu[i] = mu2
C[i] = C2
# Call function to get temperatures
T = sstemp(A,k,dz,nz,T_surf,q_surf)
T = T + 273.15 # Convert to Kelvins
# Initialize arrays
P = np.zeros(nz)
frict = np.zeros(nz)
visc = np.zeros(nz)
strength = np.zeros(nz)
# Calculate lithostatic pressure
for i in range(1, nz):
P[i] = P[i-1] + rho[i] * g * dz
# Loop over all points and calculate frictional and viscous strengths
for i in range(nz):
# Calculate frictional shear strength using Coulomb criterion
frict[i] = mu[i] * P[i] + C[i]
# Calculate viscous strength using Dorn's law
visc[i] = (edot/Avisc[i])**((1./n[i]))*np.exp(Q[i]/(n[i]*R*T[i]))
# Use logical statements to make sure the stored strength value is the
# smaller of the two calculated above for each point
if frict[i] <= visc[i]:
strength[i] = frict[i]
else:
strength[i] = visc[i]
# Rescale values for plotting
T = T - 273.15
z = z / km2m
strength = strength / MPa2Pa
z_bott = z_bott / km2m
# Create figure window for plot
plt.figure()
# PLOT #1 - Left panel, temperature versus depth
plt.subplot(121)
# Plot temperature on left subplot
plt.plot(T, z, "ro-")
# Invert y axis
plt.gca().invert_yaxis()
# Label axes
plt.xlabel("Temperature [$^{\circ}$C]")
plt.ylabel("Depth [km]")
# PLOT #2 - Right panel, strength versus depth
plt.subplot(122)
# Plot strength versus deprh
plt.plot(strength, z, "ko-") # minus sign is placed to make z axis point down
# Invert y axis
plt.gca().invert_yaxis()
# Label axes
plt.xlabel("Strength [MPa]")
# Add text labels for materials
plt.text(0.2*max(strength), 0.8*z_bott, "Layer 1: "+mat1)
plt.text(0.2*max(strength), 0.85*z_bott, "Layer 2: "+mat2)
plt.show()
| mit | -5,469,655,399,814,648,000 | 36.97549 | 141 | 0.47954 | false | 3.452317 | false | false | false |
sony/nnabla | python/src/nnabla/backward_function/log_softmax.py | 1 | 1273 | # Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nnabla.functions as F
from .utils import no_grad, positive_axis, get_output
def log_softmax_backward(inputs, axis=None):
"""
Args:
inputs (list of nn.Variable): Incomming grads/inputs to/of the forward function.
kwargs (dict of arguments): Dictionary of the corresponding function arguments.
Return:
list of Variable: Return the gradients wrt inputs of the corresponding function.
"""
dy = inputs[0]
x0 = inputs[1]
y0 = get_output(x0, "LogSoftmax")
D = len(x0.shape)
axis = positive_axis(axis, D)
dx0 = dy - F.exp(y0) * F.sum(dy, axis=axis, keepdims=True)
return dx0
| apache-2.0 | 1,820,603,704,380,515,000 | 34.361111 | 86 | 0.716418 | false | 3.755162 | false | false | false |
thonkify/thonkify | src/lib/future/backports/email/iterators.py | 1 | 2346 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Barry Warsaw
# Contact: [email protected]
"""Various types of useful iterators and generators."""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
__all__ = [
'body_line_iterator',
'typed_subpart_iterator',
'walk',
# Do not include _structure() since it's part of the debugging API.
]
import sys
from io import StringIO
# This function will become a method of the Message class
def walk(self):
"""Walk over the message tree, yielding each subpart.
The walk is performed in depth-first order. This method is a
generator.
"""
yield self
if self.is_multipart():
for subpart in self.get_payload():
for subsubpart in subpart.walk():
yield subsubpart
# These two functions are imported into the Iterators.py interface module.
def body_line_iterator(msg, decode=False):
"""Iterate over the parts, returning string payloads line-by-line.
Optional decode (default False) is passed through to .get_payload().
"""
for subpart in msg.walk():
payload = subpart.get_payload(decode=decode)
if isinstance(payload, str):
for line in StringIO(payload):
yield line
def typed_subpart_iterator(msg, maintype='text', subtype=None):
"""Iterate over the subparts with a given MIME type.
Use `maintype' as the main MIME type to match against; this defaults to
"text". Optional `subtype' is the MIME subtype to match against; if
omitted, only the main type is matched.
"""
for subpart in msg.walk():
if subpart.get_content_maintype() == maintype:
if subtype is None or subpart.get_content_subtype() == subtype:
yield subpart
def _structure(msg, fp=None, level=0, include_default=False):
"""A handy debugging aid"""
if fp is None:
fp = sys.stdout
tab = ' ' * (level * 4)
print(tab + msg.get_content_type(), end='', file=fp)
if include_default:
print(' [%s]' % msg.get_default_type(), file=fp)
else:
print(file=fp)
if msg.is_multipart():
for subpart in msg.get_payload():
_structure(subpart, fp, level + 1, include_default)
| mit | -7,011,586,712,797,445,000 | 30.702703 | 75 | 0.650469 | false | 3.858553 | false | false | false |
eladnoor/equilibrator | gibbs/forms.py | 1 | 5821 | from django import forms
from util import constants
import haystack.forms
class ListFormField(forms.MultipleChoiceField):
"""
A form field for a list of values that are unchecked.
The Django MultipleChoiceField does *almost* what we want, except
it validates that each choice is in a supplied list of choices,
even when that list is empty. We simply override the validation.
"""
def valid_value(self, value):
return True
class EnzymeForm(forms.Form):
ec = forms.CharField(max_length=50)
# Convenience accessors for clean data with defaults.
cleaned_ec = property(lambda self: self.cleaned_data['ec'])
class BaseSearchForm(haystack.forms.SearchForm):
def _GetWithDefault(self, key, default):
if (key not in self.cleaned_data or self.cleaned_data[key] is None):
return default
return self.cleaned_data[key]
class SuggestForm(BaseSearchForm):
query = forms.CharField(max_length=2048, required=False)
cleaned_query = property(lambda self: self._GetWithDefault('query', ''))
class SearchForm(BaseSearchForm):
query = forms.CharField(max_length=2048, required=False)
ph = forms.FloatField(required=False)
pmg = forms.FloatField(required=False)
ionic_strength = forms.FloatField(required=False)
electronReductionPotential = forms.FloatField(required=False)
max_priority = forms.IntegerField(required=False)
mode = forms.ChoiceField(required=False,
choices=[('BA', 'basic'), ('AD', 'advanced')])
# Convenience accessors for clean data with defaults.
cleaned_query = property(lambda self: self._GetWithDefault('query', ''))
cleaned_ph = property(lambda self: self._GetWithDefault('ph', None))
cleaned_pmg = property(lambda self: self._GetWithDefault('pmg', None))
cleaned_ionic_strength = property(
lambda self: self._GetWithDefault('ionic_strength', None))
cleaned_e_reduction_potential = property(
lambda self: self._GetWithDefault('electronReductionPotential', None))
cleaned_max_priority = property(
lambda self: self._GetWithDefault('max_priority', 0))
cleaned_mode = property(
lambda self: self._GetWithDefault('mode', ''))
class BaseReactionForm(SearchForm):
def GetReactantConcentrations(self):
prefactors = map(float,
self.cleaned_data['reactantsConcentrationPrefactor'])
for f, c in zip(prefactors,
self.cleaned_data['reactantsConcentration']):
try:
conc = f * float(c)
if conc <= 0:
yield 1e-9
else:
yield conc
except ValueError:
yield 1e-9
reactantsPhase = forms.MultipleChoiceField(required=False,
choices=constants.PHASE_CHOICES)
reactantsConcentration = ListFormField(required=False)
reactantsConcentrationPrefactor = ListFormField(required=False)
# Convenience accessors for clean data with defaults.
cleaned_reactantsPhase = property(
lambda self: self.cleaned_data['reactantsPhase'])
cleaned_reactantsConcentration = property(GetReactantConcentrations)
class ReactionForm(BaseReactionForm):
reactionId = forms.CharField(required=False)
reactantsId = ListFormField(required=False)
reactantsCoeff = ListFormField(required=False)
reactantsName = ListFormField(required=False)
submit = forms.ChoiceField(required=False,
choices=[('Update', 'update'),
('Save', 'save'),
('Reverse', 'reverse'),
('Reset', 'reset')])
# Convenience accessors for clean data with defaults.
cleaned_reactionId = property(
lambda self: self.cleaned_data['reactionId'])
cleaned_reactantsId = property(
lambda self: self.cleaned_data['reactantsId'])
cleaned_reactantsCoeff = property(
lambda self: [float(c) for c in self.cleaned_data['reactantsCoeff']])
cleaned_reactantsName = property(
lambda self: self.cleaned_data['reactantsName'])
cleaned_submit = property(
lambda self: self._GetWithDefault('submit', 'Update'))
class ReactionGraphForm(ReactionForm):
vary_ph = forms.BooleanField(required=False)
vary_is = forms.BooleanField(required=False)
vary_pmg = forms.BooleanField(required=False)
# Convenience accessors for clean data with defaults.
cleaned_vary_ph = property(
lambda self: self._GetWithDefault('vary_ph', False))
cleaned_vary_pmg = property(
lambda self: self._GetWithDefault('vary_pmg', False))
cleaned_vary_is = property(
lambda self: self._GetWithDefault('vary_is', False))
class CompoundForm(BaseReactionForm):
compoundId = forms.CharField(max_length=50)
submit = forms.ChoiceField(required=False,
choices=[('Update', 'update'),
('Reset', 'reset')])
# Convenience accessors for clean data with defaults.
cleaned_compoundId = property(lambda self: self.cleaned_data['compoundId'])
# we need to create the following properties in order for this form
# to impersonate a reaction_form (something we need to do for creating
# a Reaction object using .FromForm(form))
cleaned_reactionId = property(lambda self: None)
cleaned_reactantsId = property(lambda self: [self.cleaned_compoundId])
cleaned_reactantsCoeff = property(lambda self: [1])
cleaned_reactantsName = property(lambda self: [None])
cleaned_submit = property(
lambda self: self._GetWithDefault('submit', 'Update'))
| mit | 2,429,056,739,897,362,400 | 37.549669 | 79 | 0.655214 | false | 4.215062 | false | false | false |
USGSDenverPychron/pychron | pychron/spectrometer/local_mftable_history_view.py | 1 | 3951 | # ===============================================================================
# Copyright 2014 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from traits.api import HasTraits, Float, List
from traitsui.api import View, UItem, VGroup, HSplit, TabularEditor
from traitsui.editors import TextEditor
from traitsui.group import HGroup
from traitsui.tabular_adapter import TabularAdapter
# ============= standard library imports ========================
# ============= local library imports ==========================
from pychron.git_archive.history import GitArchiveHistory, GitArchiveHistoryView, DiffView
def left_group():
return VGroup(HGroup(UItem('left_message', style='readonly'),
UItem('left_date', style='readonly')),
UItem('left',
style='custom',
editor=TextEditor(read_only=True)))
def right_group():
return VGroup(HGroup(UItem('right_message', style='readonly'),
UItem('right_date', style='readonly')),
UItem('right',
style='custom',
editor=TextEditor(read_only=True)))
class ItemAdapter(TabularAdapter):
pass
class FieldItem(HasTraits):
pass
class MFTableDiffView(DiffView):
diff_items = List
def __init__(self, *args, **kw):
super(MFTableDiffView, self).__init__(*args, **kw)
self._load_diff()
def _load_diff(self):
lkeys, lvalues = self._parse_txt(self.left)
rkeys, rvalues = self._parse_txt(self.right)
self.item_adapter = ItemAdapter()
if lkeys == rkeys:
cols = [(v, v) for v in lkeys]
self.item_adapter.columns = cols
for lv in lvalues:
iso = lv[0]
rv = next((ri for ri in rvalues if ri[0] == iso))
d = FieldItem(iso=iso)
for i, k in enumerate(lkeys[1:]):
dv = float(lv[i + 1]) - float(rv[i + 1])
d.add_trait(k, Float(dv))
self.diff_items.append(d)
def _parse_txt(self, txt):
lines = txt.split('\n')
keys = lines[0].split(',')
data = [line.split(',') for line in lines[1:] if line]
return keys, data
def traits_view(self):
v = View(VGroup(HSplit(left_group(), right_group()),
UItem('diff_items', editor=TabularEditor(editable=False,
adapter=self.item_adapter))),
title='Diff',
width=900,
buttons=['OK'],
kind='livemodal',
resizable=True)
return v
class LocalMFTableHistory(GitArchiveHistory):
diff_klass = MFTableDiffView
class LocalMFTableHistoryView(GitArchiveHistoryView):
pass
# if __name__ == '__main__':
# r = '/Users/ross/Sandbox/gitarchive'
# gh = LocalMFTableHistory(r, '/Users/ross/Sandbox/ga_test.txt')
#
# gh.load_history('ga_test.txt')
#
# gh.selected = [gh.items[5], gh.items[6]]
# gh._diff_button_fired()
# # ghv = LocalMFTableHistoryView(model=gh)
# # ghv.configure_traits(kind='livemodal')
# ============= EOF =============================================
| apache-2.0 | 8,448,448,464,310,305,000 | 33.060345 | 94 | 0.539863 | false | 4.081612 | false | false | false |
dariost/utility | pi.py | 1 | 1131 | #!/usr/bin/env python3
#####################################################
# #
# License: Apache License 2.0 #
# Author: Dario Ostuni <[email protected]> #
# #
#####################################################
import sys
def fatt(n):
tmp = 1
for i in range(1, n + 1):
tmp *= i
return tmp
def term(n):
num = ((-1)**n)*fatt(4*n)*(21460*n+1123)
den = (fatt(n)**4)*(14112**(2*n))
return num, den
def mcd(a, b):
if a < b:
a, b = b, a
while b != 0:
a, b = b, a % b
return a
if len(sys.argv) == 2:
cfr = int(sys.argv[1])
else:
cfr = int(input("Number of digits: "))
prec = cfr // 5 + 10
num = 0
den = 1
for i in range(prec):
tmp_n, tmp_d = term(i)
num = num * tmp_d + den * tmp_n
den = den * tmp_d
gdc = mcd(num, den)
num //= gdc
den //= gdc
num, den = den * 3528, num
num -= 3 * den
print("3.", end='')
for i in range(cfr):
num *= 10
print(num // den, end='')
num %= den
print(flush=True)
| apache-2.0 | 1,180,066,181,916,586,800 | 20.75 | 53 | 0.402299 | false | 3.040323 | false | false | false |
zenn1989/scoria-interlude | L2Jscoria-Game/data/scripts/custom/8009_HotSpringsBuffs/__init__.py | 1 | 2538 | import sys
from com.l2scoria.gameserver.model.quest import State
from com.l2scoria.gameserver.model.quest import QuestState
from com.l2scoria.gameserver.model.quest.jython import QuestJython as JQuest
from com.l2scoria.gameserver.datatables import SkillTable
from com.l2scoria.util.random import Rnd
qn = "8009_HotSpringsBuffs"
HSMOBS = [21316, 21317, 21321, 21322, 21314, 21319]
class Quest (JQuest) :
def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr)
def onAttack (self,npc,player,damage,isPet):
npcId = npc.getNpcId()
if npcId in HSMOBS:
if (Rnd.get(2) == 1):
if player.getFirstEffect(int(4554)):
malaria = player.getFirstEffect(int(4554)).getLevel()
if (Rnd.get(100) < 15):
if malaria < 10:
newmalaria = int(malaria + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4554,newmalaria))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4554,1))
elif npcId == 21317 or npcId == 21322 :
if player.getFirstEffect(int(4553)):
flu = player.getFirstEffect(int(4553)).getLevel()
if (Rnd.get(100) < 15):
if flu < 10:
newflu = int(flu + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4553,newflu))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4553,1))
elif npcId == 21319 or npcId == 21316 :
if player.getFirstEffect(int(4552)):
holera = player.getFirstEffect(int(4552)).getLevel()
if (Rnd.get(100) < 30):
if holera < 10:
newholera = int(holera + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4552,newholera))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4552,1))
else:
if player.getFirstEffect(int(4551)):
rheumatism = player.getFirstEffect(int(4551)).getLevel()
if (Rnd.get(100) < 30):
if rheumatism < 10:
newrheumatism = int(rheumatism + 1)
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4551,newrheumatism))
else:
npc.setTarget(player)
npc.doCast(SkillTable.getInstance().getInfo(4551,1))
return
QUEST = Quest(8009,qn,"custom")
for i in HSMOBS:
QUEST.addAttackId(i) | gpl-3.0 | -930,510,215,556,508,700 | 36.338235 | 78 | 0.611505 | false | 2.971897 | false | false | false |
simleo/pydoop-features | pyfeatures/app/summarize.py | 1 | 2371 | # BEGIN_COPYRIGHT
#
# Copyright (C) 2017 Open Microscopy Environment:
# - University of Dundee
# - CRS4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
"""\
Summarize the contents of an output (featureset) Avro container.
"""
import os
import warnings
try:
from pyavroc import AvroFileReader
except ImportError:
from pyfeatures.pyavroc_emu import AvroFileReader
warnings.warn("pyavroc not found, using standard avro lib\n")
def add_parser(subparsers):
parser = subparsers.add_parser("summarize", description=__doc__)
parser.add_argument("in_fn", metavar="FILE", help="Avro container file")
parser.add_argument("-o", "--out-fn", metavar="FILE", help="output file")
parser.set_defaults(func=run)
return parser
def run(logger, args, extra_argv=None):
if not args.out_fn:
tag = os.path.splitext(os.path.basename(args.in_fn))[0]
args.out_fn = "%s.summary" % tag
str_keys = ["name", "img_path"]
int_keys = ["series", "z", "c", "t", "w", "h", "x", "y"]
d = {"n_features": set()}
with open(args.in_fn) as f:
reader = AvroFileReader(f)
for r in reader:
d["n_features"].add(
sum(len(v) for k, v in r.iteritems() if type(v) is list)
)
for k in str_keys:
d.setdefault(k, set()).add(r[k])
for k in int_keys:
d.setdefault(k, set()).add(int(r[k]))
logger.info("writing to %s", args.out_fn)
with open(args.out_fn, "w") as fo:
for k in str_keys:
fo.write("%s: %s\n" % (k, ", ".join(sorted(d[k]))))
for k in int_keys:
v = sorted(d[k])
if len(v) > 2 and v == range(v[0], v[-1] + 1):
fo.write("%s: %d-%d\n" % (k, v[0], v[-1]))
else:
fo.write("%s: %s\n" % (k, ", ".join(map(str, v))))
| apache-2.0 | 5,210,433,854,678,834,000 | 33.362319 | 77 | 0.601012 | false | 3.261348 | false | false | false |
WBradbeer/port-routing | lp_helpers.py | 1 | 2253 | import itertools
import numpy as np
def flatten_2(data):
vector = []
for i in data:
for j in i:
vector.append(j)
return vector
def flatten_3(data):
return flatten_2(flatten_2(data))
def reshape_2D(vector, rows, cols):
data = []
for i in range(0, rows):
data.append([])
for j in range(0, cols):
data[i].append(vector[j + i*cols])
return data
def reshape_3D(vector, F, D):
data = []
for i in range(0, F):
data.append([])
for j in range(0, F):
data[i].append([])
for k in range(0, D):
data[i][j].append(vector[k + j*D + i*F*D])
return data
def combine_matrices(d1, d2):
combined = []
for i in range(0, len(d1)):
combined.append([])
for k in range(0, len(d1[i])):
combined[i].append([])
for j in range(0, len(d2[k])):
combined[i][k].append(d1[i][k] + d2[k][j])
return combined
def sum_ij_over_k(F, D):
block = np.tile(np.identity(D), F)
zeros = np.zeros_like(block)
id_f = np.identity(F)
return flatten_2([np.hstack((block if col == 1 else zeros for col in row)
) for row in id_f])
def row_sums(row_num, col_num):
ident = np.identity(row_num)
return [flatten_2([[i] * col_num for i in ident_row]) for ident_row in ident]
def col_sums(row_num, col_num):
ident = np.identity(col_num)
return np.hstack((ident for i in range(0, row_num)))
def scanner_constraints(scanning, F, D):
scanning = [abs(x - 1) for x in scanning]
return flatten_2([[x]*D for x in scanning]*F)
def generate_x(F, D):
x = []
for i in range(0, F):
for k in range(0, F):
for j in range(0, D):
x.append("X({},{},{})".format(i+1, k+1, j+1))
return x
def show_eq(x, coefs, b):
eq = ""
for i in range(0, len(x)):
eq += str(coefs[i]) + "*" + x[i] + " "
return eq + "= " + str(b)
def gen_scanning_combs(F):
for comb in itertools.product([0,1], repeat=F):
yield comb
def gen_scanning_bound(combs, scanner_capacity, base=[]):
for comb in combs:
yield np.append(base, np.array(comb)*scanner_capacity)
| mit | -9,073,479,037,994,305,000 | 23.758242 | 81 | 0.537949 | false | 3.032301 | false | false | false |
lixingcong/shadowsocks_analysis | shadowsocks/server.py | 1 | 5564 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
服务端
ps:我是先看完local.py再看server.py;
发现:除了多用户的思路判断,别的代码思路是一致的,部分没有注释,可以回去翻翻local.py
'''
from __future__ import absolute_import, division, print_function, \
with_statement
import sys
import os
import logging
import signal
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '../'))
from shadowsocks import utils, daemon, encrypt, eventloop, tcprelay, udprelay, \
asyncdns
def main():
utils.check_python()
# is_local=false
config = utils.get_config(False)
daemon.daemon_exec(config)
utils.print_shadowsocks()
# 支持多客户端
if config['port_password']:
if config['password']:
logging.warn('warning: port_password should not be used with '
'server_port and password. server_port and password '
'will be ignored')
else:
config['port_password'] = {}
server_port = config['server_port']
# 若发现有多用户配置:采用‘端口->密码’的映射方式。
if type(server_port) == list:
for a_server_port in server_port:
config['port_password'][a_server_port] = config['password']
else:
config['port_password'][str(server_port)] = config['password']
# Create an instance of the cipher class
encrypt.try_cipher(config['password'], config['method'])
tcp_servers = []
udp_servers = []
dns_resolver = asyncdns.DNSResolver()
# 一个服务器端可以打开多个端口
# 对于每个端口,都要新建一个对应的处理器
for port, password in config['port_password'].items():
a_config = config.copy()
a_config['server_port'] = int(port)
a_config['password'] = password
logging.info("starting server at %s:%d" %
(a_config['server'], int(port)))
# 逐一加到tcp、udp列表
tcp_servers.append(tcprelay.TCPRelay(a_config, dns_resolver, False))
udp_servers.append(udprelay.UDPRelay(a_config, dns_resolver, False))
def run_server():
# 收到退出信号的处理函数,关闭所有socket释放资源。
def child_handler(signum, _):
logging.warn('received SIGQUIT, doing graceful shutting down..')
# 关闭所有的socket,一句话搞定,好厉害,跪拜ing
# map(function, sequence[, sequence, ...]) -> list
# Return a list of the results of applying the function to the items of the argument sequence(s).
list(map(lambda s: s.close(next_tick = True),
tcp_servers + udp_servers))
# 收到退出信号,调用child_handler进行自杀。
signal.signal(getattr(signal, 'SIGQUIT', signal.SIGTERM),
child_handler)
# 收到退出信号,调用int_handler进行自杀。
def int_handler(signum, _):
sys.exit(1)
signal.signal(signal.SIGINT, int_handler)
try:
loop = eventloop.EventLoop()
dns_resolver.add_to_loop(loop)
# 把所有的监听端口添加到时间循环中,一句话搞定,好厉害,跪拜ing
list(map(lambda s: s.add_to_loop(loop), tcp_servers + udp_servers))
loop.run()
except (KeyboardInterrupt, IOError, OSError) as e:
logging.error(e)
if config['verbose']:
import traceback
traceback.print_exc()
os._exit(1)
# Shadowsocks supports spawning child processes like nginx.
# You can use --workers to specify how many workers to use.
# This argument is only supported on Unix and ssserver.
# Currently UDP relay does not work well on multiple workers.
# 支持像nginx多进程,可以在config中指定worker的数量。仅在linux下生效。
# 目前的bug:worker设为大于1时,udp转发有可能工作不正常
if int(config['workers']) > 1:
if os.name == 'posix':
children = []
is_child = False
for i in range(0, int(config['workers'])):
r = os.fork()
if r == 0:
logging.info('worker started')
is_child = True
run_server()
break
else:
children.append(r)
if not is_child:
def handler(signum, _):
for pid in children:
try:
os.kill(pid, signum)
os.waitpid(pid, 0)
except OSError: # child may already exited
pass
sys.exit()
signal.signal(signal.SIGTERM, handler)
signal.signal(signal.SIGQUIT, handler)
signal.signal(signal.SIGINT, handler)
# master
for a_tcp_server in tcp_servers:
a_tcp_server.close()
for a_udp_server in udp_servers:
a_udp_server.close()
dns_resolver.close()
for child in children:
os.waitpid(child, 0)
else:
logging.warn('worker is only available on Unix/Linux')
run_server()
else:
run_server()
if __name__ == '__main__':
main()
| mit | 9,211,529,882,349,999,000 | 33.353741 | 111 | 0.543762 | false | 3.328939 | true | false | false |
lexdene/hbml | tests/template_test.py | 1 | 1579 | import os
import unittest
import hbml
def _file_content(path):
with open(path, 'r') as f:
content = f.read()
return content
DIRPATH = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
'templates'
)
class TemplateTestCase(unittest.TestCase):
def _test_file(self, filename):
self.assertEqual(
_file_content(
os.path.join(
DIRPATH,
filename + '.html'
)
),
hbml.compile(
_file_content(
os.path.join(
DIRPATH,
filename + '.hbml'
)
)
) + "\n"
)
def _test_uncompress_file(self, filename):
self.assertEqual(
_file_content(
os.path.join(
DIRPATH,
filename + '.uncompress.html'
)
),
hbml.compile(
_file_content(
os.path.join(
DIRPATH,
filename + '.hbml'
)
),
compress_output=False
)
)
def testTemplates(self):
for filename in os.listdir(DIRPATH):
filename, extname = os.path.splitext(filename)
if extname == '.hbml':
with self.subTest(filename=filename):
self._test_file(filename)
self._test_uncompress_file(filename)
| gpl-3.0 | 8,569,644,330,344,459,000 | 23.671875 | 58 | 0.419886 | false | 4.671598 | true | false | false |
digitalvectorz/syn | Syn/Unlink.py | 1 | 1819 | """
Simple unlink routines
@license: GPL-3+
@author: Paul Tagliamonte <[email protected]>
@date: August 8th, 2011, 00:10 -0000
Unlink a package into the filesystem
"""
import os.path
import Syn.Exceptions
import Syn.Policy.Db as D
import Syn.Policy.BinaryPackage as B
import Syn.Policy.Chroot as C
import Syn.PackageRegistry
def unlink(packageid):
"""
Unlink a package from filesystem, and get ready
to do some awesomeness.
@arg packageid: Name of the package to unlink into the filesystem.
"""
ROOT_PATH = D.DB_ROOT
pkgdb = Syn.PackageRegistry.PackageRegistry(ROOT_PATH)
cruldb = Syn.PackageRegistry.CrulRegistry(ROOT_PATH)
try:
pkgid = cruldb.getPackage(packageid)
pkginf = pkgdb.getPackage(packageid).format()
Syn.Log.l(Syn.Log.PEDANTIC,"Package DB Dump: %s" % pkgid)
package_root = pkgid['path']
popdir = Syn.Common.getcwd()
Syn.Sh.cd(ROOT_PATH + package_root)
Syn.Sh.cd("./" + B.FS_ROOT)
tree = Syn.Common.getDirectoryTree()
supercool = {}
for t in tree:
supercool[t[1:]] = os.path.abspath(t)
crul = cruldb.getPackage(packageid)
crul_status = crul['status']
crul_path = crul['path']
if crul_status != "LINKED":
raise Syn.Exceptions.PackageNotinstalledException("Package not linked! -- " + packageid)
else:
Syn.Log.l(Syn.Log.PEDANTIC,"Package linked. unlinking.")
cruldb.setPackage(packageid, {
"status" : "HALF-LINKED",
"path" : crul_path
})
cruldb.write()
for s in supercool:
Syn.Log.l(Syn.Log.PEDANTIC,"Removing: %s" % s)
Syn.Sh.rm(C.CHROOT + s)
cruldb.setPackage(packageid, {
"status" : "INSTALLED",
"path" : crul_path
})
cruldb.write()
Syn.Sh.cd(popdir)
except Syn.Exceptions.PackageNotFoundException as e:
Syn.Log.l(Syn.Log.VERBOSE,"Shit. No package found. Passing exception up")
raise e
| gpl-3.0 | 8,327,458,023,312,325,000 | 23.581081 | 91 | 0.692688 | false | 2.751891 | false | false | false |
amd77/parker | matriculas/views.py | 1 | 1489 | # Create your views here.
from django.utils import timezone
# from django.views.generic import View, TemplateView, UpdateView
from django.views.generic import ListView, RedirectView
from models import Registro
from django.core.urlresolvers import reverse
class RedirectDia(RedirectView):
permanent = False
def get_redirect_url(self):
now = timezone.now()
return reverse("dia_ymd", args=[now.year, now.month, now.day])
class VistaDia(ListView):
model = Registro
template_name = "matriculas/dia.html"
def get_queryset(self):
self.year = int(self.kwargs["year"], 10)
self.month = int(self.kwargs["month"], 10)
self.day = int(self.kwargs.get("day", "0"), 10)
return Registro.coches_dia(self.year, self.month, self.day)
def get_context_data(self, **kwargs):
context = super(VistaDia, self).get_context_data(**kwargs)
context.update(Registro.estadisticas_dia(self.year, self.month, self.day))
return context
class VistaMes(ListView):
model = Registro
template_name = "matriculas/mes.html"
def get_queryset(self):
self.year = int(self.kwargs["year"], 10)
self.month = int(self.kwargs["month"], 10)
return Registro.coches_dia(self.year, self.month)
def get_context_data(self, **kwargs):
context = super(VistaMes, self).get_context_data(**kwargs)
context.update(Registro.estadisticas_mes(self.year, self.month))
return context
| gpl-2.0 | -4,901,623,654,913,660,000 | 32.088889 | 82 | 0.67495 | false | 3.3918 | false | false | false |
hylje/sankarit | sankarit/models/adventure.py | 1 | 5383 | # -*- encoding: utf-8 -*-
import itertools
import random
import datetime
from collections import defaultdict
from flask import g
from sankarit import itemclasses, adventureclasses
from sankarit.models.item import Item
class Adventure(object):
@classmethod
def create(cls, adventureclass, heroes):
c = g.db.cursor()
start_time = datetime.datetime.now()
end_time = start_time + adventureclass.timedelta
c.execute("""
INSERT INTO adventure (start_time, end_time, class, gold)
VALUES (%(start_time)s, %(end_time)s, %(class)s, %(gold)s)
RETURNING id
""", {
"start_time": start_time,
"end_time": end_time,
"class": adventureclass.id,
"gold": 0
})
aid, = c.fetchone()
# Create relation to all heroes
values = list(itertools.chain(*((hero.hid, aid) for hero in heroes)))
query = """
INSERT INTO adventure_hero (hero_id, adventure_id)
VALUES """ + ", ".join("(%s, %s)" for hero in heroes)
c.execute(query, values)
g.db.commit()
return cls(aid, start_time, end_time, adventureclass.id, 0, heroes=heroes, adventureclass=adventureclass)
def __init__(self, aid, start_time, end_time, adventureclass_id,
gold, heroes=None, adventureclass=None):
self.aid = aid
self.start_time = start_time
self.end_time = end_time
self.adventureclass_id = adventureclass_id
self.adventureclass = adventureclass or adventureclasses.get_adventureclass(adventureclass_id)
self.gold = gold
self.heroes = heroes or self.get_heroes()
def get_heroes(self):
from sankarit.models.hero import Hero
c = g.db.cursor()
c.execute("""
SELECT h.id as id, h.name as name, h.class as class, h.xp as xp, h.player_id as player_id
FROM hero h, adventure_hero ah
WHERE ah.adventure_id=%(aid)s AND h.id=ah.hero_id
""", {"aid": self.aid})
ret = []
for hero in c.fetchall():
ret.append(Hero(*hero))
return ret
def can_be_claimed(self):
if self.end_time < datetime.datetime.now() and self.gold == 0:
return True
else:
return False
def resolve_reward(self):
# XXX maybe split this into more functions
c = g.db.cursor()
offense = (sum(hero.offense() for hero in self.heroes)
* random.random() * 4
* (self.adventureclass.timedelta.total_seconds() / 2400))
defense = (sum(hero.defense() for hero in self.heroes)
* sum(hero.defense_factor() for hero in self.heroes)
* random.random() * 3
* (self.adventureclass.timedelta.total_seconds() / 2400))
success_rating = min(offense, defense*3/2) * 5
loot_ratio = random.random()
gold_ratio = 1 - loot_ratio
loot_rating = int(success_rating * loot_ratio)
gold_rating = int(success_rating * gold_ratio)
xp_rating = int(success_rating * 0.5)
c.execute("""
UPDATE adventure SET gold=%(gold)s WHERE id=%(aid)s
""", {"gold": gold_rating, "aid": self.aid})
level_total = sum(hero.get_level() for hero in self.heroes)
gold_per_player = defaultdict(int)
loot_per_player = defaultdict(int)
for hero in self.heroes:
contrib_ratio = hero.get_level() / level_total
gained_loot = contrib_ratio * loot_rating
gained_gold = contrib_ratio * gold_rating
gained_xp = contrib_ratio * xp_rating
c.execute("""
UPDATE hero SET xp=xp+%(gained_xp)s WHERE id=%(hero_id)s
""", {"gained_xp": gained_xp, "hero_id": hero.hid})
gold_per_player[hero.player_id] += gained_gold
loot_per_player[hero.player_id] += gained_loot
for player_id, gold_total in gold_per_player.iteritems():
c.execute("""
UPDATE player SET gold=gold+%(gold_total)s WHERE id=%(player_id)s
""", {"gold_total": gold_total, "player_id": player_id})
itemobjs = Item.generate(loot_per_player, self.heroes)
# commit the entire thing
g.db.commit()
return gold_per_player, itemobjs
def started_ago(self):
now = datetime.datetime.now()
if self.start_time > now:
return "Tulossa"
td = now - self.start_time
bits = [
(td.days, u"päivää"),
(td.seconds / 3600, u"tuntia"),
((td.seconds / 60) % 60, u"minuuttia"),
(td.seconds % 60, u"sekuntia")
]
valid_bits = [(time, text) for time, text in bits if time > 0]
if valid_bits:
valid_bits = valid_bits[:2]
return u", ".join(u"%s %s" % b for b in valid_bits) + u" sitten"
else:
return u"Juuri nyt"
def progress(self):
now = datetime.datetime.now()
if self.end_time < now:
return 1
if self.start_time > now:
return 0
factor = (self.end_time - now).total_seconds() / float((self.end_time - self.start_time).total_seconds())
complement = 1 - factor
percent = 100 * complement
return "%.04f" % percent
| bsd-3-clause | -7,869,746,268,539,831,000 | 30.461988 | 113 | 0.563755 | false | 3.453145 | false | false | false |
snakeleon/YouCompleteMe-x86 | third_party/ycmd/ycmd/handlers.py | 1 | 10839 | # Copyright (C) 2013 Google Inc.
# 2017 ycmd contributors
#
# This file is part of ycmd.
#
# ycmd is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ycmd is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ycmd. If not, see <http://www.gnu.org/licenses/>.
from __future__ import absolute_import
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import * # noqa
import bottle
import json
import logging
import platform
import sys
import time
import traceback
from bottle import request
from threading import Thread
import ycm_core
from ycmd import extra_conf_store, hmac_plugin, server_state, user_options_store
from ycmd.responses import ( BuildExceptionResponse, BuildCompletionResponse,
UnknownExtraConf )
from ycmd.request_wrap import RequestWrap
from ycmd.bottle_utils import SetResponseHeader
from ycmd.completers.completer_utils import FilterAndSortCandidatesWrap
# num bytes for the request body buffer; request.json only works if the request
# size is less than this
bottle.Request.MEMFILE_MAX = 10 * 1024 * 1024
_server_state = None
_hmac_secret = bytes()
_logger = logging.getLogger( __name__ )
app = bottle.Bottle()
wsgi_server = None
@app.post( '/event_notification' )
def EventNotification():
_logger.info( 'Received event notification' )
request_data = RequestWrap( request.json )
event_name = request_data[ 'event_name' ]
_logger.debug( 'Event name: %s', event_name )
event_handler = 'On' + event_name
getattr( _server_state.GetGeneralCompleter(), event_handler )( request_data )
filetypes = request_data[ 'filetypes' ]
response_data = None
if _server_state.FiletypeCompletionUsable( filetypes ):
response_data = getattr( _server_state.GetFiletypeCompleter( filetypes ),
event_handler )( request_data )
if response_data:
return _JsonResponse( response_data )
return _JsonResponse( {} )
@app.post( '/run_completer_command' )
def RunCompleterCommand():
_logger.info( 'Received command request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.OnUserCommand(
request_data[ 'command_arguments' ],
request_data ) )
@app.post( '/completions' )
def GetCompletions():
_logger.info( 'Received completion request' )
request_data = RequestWrap( request.json )
( do_filetype_completion, forced_filetype_completion ) = (
_server_state.ShouldUseFiletypeCompleter( request_data ) )
_logger.debug( 'Using filetype completion: %s', do_filetype_completion )
errors = None
completions = None
if do_filetype_completion:
try:
completions = ( _server_state.GetFiletypeCompleter(
request_data[ 'filetypes' ] )
.ComputeCandidates( request_data ) )
except Exception as exception:
if forced_filetype_completion:
# user explicitly asked for semantic completion, so just pass the error
# back
raise
else:
# store the error to be returned with results from the identifier
# completer
stack = traceback.format_exc()
_logger.error( 'Exception from semantic completer (using general): ' +
"".join( stack ) )
errors = [ BuildExceptionResponse( exception, stack ) ]
if not completions and not forced_filetype_completion:
completions = ( _server_state.GetGeneralCompleter()
.ComputeCandidates( request_data ) )
return _JsonResponse(
BuildCompletionResponse( completions if completions else [],
request_data.CompletionStartColumn(),
errors = errors ) )
@app.post( '/filter_and_sort_candidates' )
def FilterAndSortCandidates():
_logger.info( 'Received filter & sort request' )
# Not using RequestWrap because no need and the requests coming in aren't like
# the usual requests we handle.
request_data = request.json
return _JsonResponse( FilterAndSortCandidatesWrap(
request_data[ 'candidates'],
request_data[ 'sort_property' ],
request_data[ 'query' ] ) )
@app.get( '/healthy' )
def GetHealthy():
_logger.info( 'Received health request' )
if request.query.include_subservers:
cs_completer = _server_state.GetFiletypeCompleter( ['cs'] )
return _JsonResponse( cs_completer.ServerIsHealthy() )
return _JsonResponse( True )
@app.get( '/ready' )
def GetReady():
_logger.info( 'Received ready request' )
if request.query.subserver:
filetype = request.query.subserver
return _JsonResponse( _IsSubserverReady( filetype ) )
if request.query.include_subservers:
return _JsonResponse( _IsSubserverReady( 'cs' ) )
return _JsonResponse( True )
def _IsSubserverReady( filetype ):
completer = _server_state.GetFiletypeCompleter( [filetype] )
return completer.ServerIsReady()
@app.post( '/semantic_completion_available' )
def FiletypeCompletionAvailable():
_logger.info( 'Received filetype completion available request' )
return _JsonResponse( _server_state.FiletypeCompletionAvailable(
RequestWrap( request.json )[ 'filetypes' ] ) )
@app.post( '/defined_subcommands' )
def DefinedSubcommands():
_logger.info( 'Received defined subcommands request' )
completer = _GetCompleterForRequestData( RequestWrap( request.json ) )
return _JsonResponse( completer.DefinedSubcommands() )
@app.post( '/detailed_diagnostic' )
def GetDetailedDiagnostic():
_logger.info( 'Received detailed diagnostic request' )
request_data = RequestWrap( request.json )
completer = _GetCompleterForRequestData( request_data )
return _JsonResponse( completer.GetDetailedDiagnostic( request_data ) )
@app.post( '/load_extra_conf_file' )
def LoadExtraConfFile():
_logger.info( 'Received extra conf load request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Load( request_data[ 'filepath' ], force = True )
return _JsonResponse( True )
@app.post( '/ignore_extra_conf_file' )
def IgnoreExtraConfFile():
_logger.info( 'Received extra conf ignore request' )
request_data = RequestWrap( request.json, validate = False )
extra_conf_store.Disable( request_data[ 'filepath' ] )
return _JsonResponse( True )
@app.post( '/debug_info' )
def DebugInfo():
_logger.info( 'Received debug info request' )
request_data = RequestWrap( request.json )
has_clang_support = ycm_core.HasClangSupport()
clang_version = ycm_core.ClangVersion() if has_clang_support else None
filepath = request_data[ 'filepath' ]
try:
extra_conf_path = extra_conf_store.ModuleFileForSourceFile( filepath )
is_loaded = bool( extra_conf_path )
except UnknownExtraConf as error:
extra_conf_path = error.extra_conf_file
is_loaded = False
response = {
'python': {
'executable': sys.executable,
'version': platform.python_version()
},
'clang': {
'has_support': has_clang_support,
'version': clang_version
},
'extra_conf': {
'path': extra_conf_path,
'is_loaded': is_loaded
},
'completer': None
}
try:
response[ 'completer' ] = _GetCompleterForRequestData(
request_data ).DebugInfo( request_data )
except Exception as error:
_logger.exception( error )
return _JsonResponse( response )
@app.post( '/shutdown' )
def Shutdown():
_logger.info( 'Received shutdown request' )
ServerShutdown()
return _JsonResponse( True )
# The type of the param is Bottle.HTTPError
def ErrorHandler( httperror ):
body = _JsonResponse( BuildExceptionResponse( httperror.exception,
httperror.traceback ) )
hmac_plugin.SetHmacHeader( body, _hmac_secret )
return body
# For every error Bottle encounters it will use this as the default handler
app.default_error_handler = ErrorHandler
def _JsonResponse( data ):
SetResponseHeader( 'Content-Type', 'application/json' )
return json.dumps( data, default = _UniversalSerialize )
def _UniversalSerialize( obj ):
try:
serialized = obj.__dict__.copy()
serialized[ 'TYPE' ] = type( obj ).__name__
return serialized
except AttributeError:
return str( obj )
def _GetCompleterForRequestData( request_data ):
completer_target = request_data.get( 'completer_target', None )
if completer_target == 'identifier':
return _server_state.GetGeneralCompleter().GetIdentifierCompleter()
elif completer_target == 'filetype_default' or not completer_target:
return _server_state.GetFiletypeCompleter( request_data[ 'filetypes' ] )
else:
return _server_state.GetFiletypeCompleter( [ completer_target ] )
def ServerShutdown():
def Terminator():
if wsgi_server:
wsgi_server.Shutdown()
# Use a separate thread to let the server send the response before shutting
# down.
terminator = Thread( target = Terminator )
terminator.daemon = True
terminator.start()
def ServerCleanup():
if _server_state:
_server_state.Shutdown()
extra_conf_store.Shutdown()
def SetHmacSecret( hmac_secret ):
global _hmac_secret
_hmac_secret = hmac_secret
def UpdateUserOptions( options ):
global _server_state
if not options:
return
# This should never be passed in, but let's try to remove it just in case.
options.pop( 'hmac_secret', None )
user_options_store.SetAll( options )
_server_state = server_state.ServerState( options )
def SetServerStateToDefaults():
global _server_state, _logger
_logger = logging.getLogger( __name__ )
user_options_store.LoadDefaults()
_server_state = server_state.ServerState( user_options_store.GetAll() )
extra_conf_store.Reset()
def KeepSubserversAlive( check_interval_seconds ):
def Keepalive( check_interval_seconds ):
while True:
time.sleep( check_interval_seconds )
_logger.debug( 'Keeping subservers alive' )
loaded_completers = _server_state.GetLoadedFiletypeCompleters()
for completer in loaded_completers:
completer.ServerIsHealthy()
keepalive = Thread( target = Keepalive,
args = ( check_interval_seconds, ) )
keepalive.daemon = True
keepalive.start()
| gpl-3.0 | -3,310,765,388,054,272,000 | 29.880342 | 80 | 0.696374 | false | 3.8341 | false | false | false |
tweemeterjop/thug | thug/DOM/W3C/Attr.py | 1 | 2167 | #!/usr/bin/env python
import bs4 as BeautifulSoup
from .Node import Node
class Attr(Node):
_value = ""
def __init__(self, doc, parent, attr):
self.doc = doc
self.parent = parent
self.attr = attr
self.tag = BeautifulSoup.Tag(parser = self.doc, name = 'attr')
Node.__init__(self, doc)
self._specified = False
self._value = self.getValue()
def __repr__(self):
return "<Attr object %s%s at 0x%08X>" % ("%s." % self.parent.tagName if self.parent else "", self.attr, id(self))
def __eq__(self, other):
return hasattr(other, "parent") and self.parent == other.parent and \
hasattr(other, "attr") and self.attr == other.attr
@property
def nodeType(self):
return Node.ATTRIBUTE_NODE
@property
def nodeName(self):
return self.attr
def getNodeValue(self):
return self.getValue()
def setNodeValue(self, value):
return self.setValue(value)
nodeValue = property(getNodeValue, setNodeValue)
@property
def childNodes(self):
from .NodeList import NodeList
return NodeList(self.parent.doc, [])
@property
def parentNode(self):
return self.parent
# Introduced in DOM Level 2
@property
def ownerElement(self):
if self.parent:
if self.parent.nodeType == Node.ELEMENT_NODE:
return self.parent
return None
@property
def ownerDocument(self):
return self.parent.doc
@property
def name(self):
return self.attr
@property
def specified(self):
if self.ownerElement is None:
return True
return self._specified
def getValue(self):
if self.parent:
if self.parent.tag.has_attr(self.attr):
self._specified = True
return self.parent.tag[self.attr]
return self._value
def setValue(self, value):
self._value = value
if self.parent:
self._specified = True
self.parent.tag[self.attr] = value
value = property(getValue, setValue)
| gpl-2.0 | -8,961,520,917,064,182,000 | 22.301075 | 121 | 0.580988 | false | 4.080979 | false | false | false |
foxdog-studios/pyddp | ddp/messages/client/sub_message.py | 1 | 1695 | # -*- coding: utf-8 -*-
# Copyright 2014 Foxdog Studios
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import copy
from .client_message import ClientMessage
__all__ = ['SubMessage']
class SubMessage(ClientMessage):
def __init__(self, id, name, params=None):
super(SubMessage, self).__init__()
self._id = id
self._name = name
self._params = copy(params)
def __eq__(self, other):
if isinstance(other, SubMessage):
return (self._id == other._id and self._name == other._name
and self._params == other._params)
return super(ClientMessage, self).__eq__(other)
def __str__(self):
return 'SubMessage({!r}, {!r}, params={!r})'.format(
self._id,
self._name,
self._params)
@property
def id(self):
return self._id
@property
def name(self):
return self._name
@property
def params(self):
return copy(self._params)
def has_params(self):
return self._params is not None
| apache-2.0 | 7,933,461,572,024,796,000 | 26.786885 | 74 | 0.630088 | false | 4.035714 | false | false | false |
KDNT/p2pool-worldcoin-old | p2pool/web.py | 1 | 25333 | from __future__ import division
import errno
import json
import os
import sys
import time
import traceback
from twisted.internet import defer
from twisted.python import log
from twisted.web import resource, static
import p2pool
from bitcoin import data as bitcoin_data
from . import data as p2pool_data, p2p
from util import deferral, deferred_resource, graph, math, memory, pack, variable
def _atomic_read(filename):
try:
with open(filename, 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
try:
with open(filename + '.new', 'rb') as f:
return f.read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
return None
def _atomic_write(filename, data):
with open(filename + '.new', 'wb') as f:
f.write(data)
f.flush()
try:
os.fsync(f.fileno())
except:
pass
try:
os.rename(filename + '.new', filename)
except: # XXX windows can't overwrite
os.remove(filename)
os.rename(filename + '.new', filename)
def get_web_root(wb, datadir_path, bitcoind_warning_var, stop_event=variable.Event()):
node = wb.node
start_time = time.time()
web_root = resource.Resource()
def get_users():
height, last = node.tracker.get_height_and_last(node.best_share_var.value)
weights, total_weight, donation_weight = node.tracker.get_cumulative_weights(node.best_share_var.value, min(height, 720), 65535*2**256)
res = {}
for script in sorted(weights, key=lambda s: weights[s]):
res[bitcoin_data.script2_to_address(script, node.net.PARENT)] = weights[script]/total_weight
return res
def get_current_scaled_txouts(scale, trunc=0):
txouts = node.get_current_txouts()
total = sum(txouts.itervalues())
results = dict((script, value*scale//total) for script, value in txouts.iteritems())
if trunc > 0:
total_random = 0
random_set = set()
for s in sorted(results, key=results.__getitem__):
if results[s] >= trunc:
break
total_random += results[s]
random_set.add(s)
if total_random:
winner = math.weighted_choice((script, results[script]) for script in random_set)
for script in random_set:
del results[script]
results[winner] = total_random
if sum(results.itervalues()) < int(scale):
results[math.weighted_choice(results.iteritems())] += int(scale) - sum(results.itervalues())
return results
def get_patron_sendmany(total=None, trunc='0.01'):
if total is None:
return 'need total argument. go to patron_sendmany/<TOTAL>'
total = int(float(total)*1e8)
trunc = int(float(trunc)*1e8)
return json.dumps(dict(
(bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8)
for script, value in get_current_scaled_txouts(total, trunc).iteritems()
if bitcoin_data.script2_to_address(script, node.net.PARENT) is not None
))
def get_global_stats():
# averaged over last hour
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
nonstale_hash_rate = p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)
stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
return dict(
pool_nonstale_hash_rate=nonstale_hash_rate,
pool_hash_rate=nonstale_hash_rate/(1 - stale_prop),
pool_stale_prop=stale_prop,
min_difficulty=bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target),
)
def get_local_stats():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.tracker.get_height(node.best_share_var.value), 3600//node.net.SHARE_PERIOD)
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
my_unstale_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes)
my_orphan_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'orphan')
my_doa_count = sum(1 for share in node.tracker.get_chain(node.best_share_var.value, lookbehind) if share.hash in wb.my_share_hashes and share.share_data['stale_info'] == 'doa')
my_share_count = my_unstale_count + my_orphan_count + my_doa_count
my_stale_count = my_orphan_count + my_doa_count
my_stale_prop = my_stale_count/my_share_count if my_share_count != 0 else None
my_work = sum(bitcoin_data.target_to_average_attempts(share.target)
for share in node.tracker.get_chain(node.best_share_var.value, lookbehind - 1)
if share.hash in wb.my_share_hashes)
actual_time = (node.tracker.items[node.best_share_var.value].timestamp -
node.tracker.items[node.tracker.get_nth_parent_hash(node.best_share_var.value, lookbehind - 1)].timestamp)
share_att_s = my_work / actual_time
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
return dict(
my_hash_rates_in_last_hour=dict(
note="DEPRECATED",
nonstale=share_att_s,
rewarded=share_att_s/(1 - global_stale_prop),
actual=share_att_s/(1 - my_stale_prop) if my_stale_prop is not None else 0, # 0 because we don't have any shares anyway
),
my_share_counts_in_last_hour=dict(
shares=my_share_count,
unstale_shares=my_unstale_count,
stale_shares=my_stale_count,
orphan_stale_shares=my_orphan_count,
doa_stale_shares=my_doa_count,
),
my_stale_proportions_in_last_hour=dict(
stale=my_stale_prop,
orphan_stale=my_orphan_count/my_share_count if my_share_count != 0 else None,
dead_stale=my_doa_count/my_share_count if my_share_count != 0 else None,
),
miner_hash_rates=miner_hash_rates,
miner_dead_hash_rates=miner_dead_hash_rates,
efficiency_if_miner_perfect=(1 - stale_orphan_shares/shares)/(1 - global_stale_prop) if shares else None, # ignores dead shares because those are miner's fault and indicated by pseudoshare rejection
efficiency=(1 - (stale_orphan_shares+stale_doa_shares)/shares)/(1 - global_stale_prop) if shares else None,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
shares=dict(
total=shares,
orphan=stale_orphan_shares,
dead=stale_doa_shares,
),
uptime=time.time() - start_time,
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
warnings=p2pool_data.get_warnings(node.tracker, node.best_share_var.value, node.net, bitcoind_warning_var.value, node.bitcoind_work.value),
donation_proportion=wb.donation_percentage/100,
version=p2pool.__version__,
protocol_version=p2p.Protocol.VERSION,
fee_private=wb.worker_fee,
)
class WebInterface(deferred_resource.DeferredResource):
def __init__(self, func, mime_type='application/json', args=()):
deferred_resource.DeferredResource.__init__(self)
self.func, self.mime_type, self.args = func, mime_type, args
def getChild(self, child, request):
return WebInterface(self.func, self.mime_type, self.args + (child,))
@defer.inlineCallbacks
def render_GET(self, request):
request.setHeader('Content-Type', self.mime_type)
request.setHeader('Access-Control-Allow-Origin', '*')
res = yield self.func(*self.args)
defer.returnValue(json.dumps(res) if self.mime_type == 'application/json' else res)
def decent_height():
return min(node.tracker.get_height(node.best_share_var.value), 720)
web_root.putChild('rate', WebInterface(lambda: p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, decent_height())/(1-p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, decent_height()))))
web_root.putChild('difficulty', WebInterface(lambda: bitcoin_data.target_to_difficulty(node.tracker.items[node.best_share_var.value].max_target)))
web_root.putChild('users', WebInterface(get_users))
web_root.putChild('user_stales', WebInterface(lambda: dict((bitcoin_data.pubkey_hash_to_address(ph, node.net.PARENT), prop) for ph, prop in
p2pool_data.get_user_stale_props(node.tracker, node.best_share_var.value, node.tracker.get_height(node.best_share_var.value)).iteritems())))
web_root.putChild('fee', WebInterface(lambda: wb.worker_fee))
web_root.putChild('current_payouts', WebInterface(lambda: dict((bitcoin_data.script2_to_address(script, node.net.PARENT), value/1e8) for script, value in node.get_current_txouts().iteritems())))
web_root.putChild('patron_sendmany', WebInterface(get_patron_sendmany, 'text/plain'))
web_root.putChild('global_stats', WebInterface(get_global_stats))
web_root.putChild('local_stats', WebInterface(get_local_stats))
web_root.putChild('peer_addresses', WebInterface(lambda: ' '.join('%s%s' % (peer.transport.getPeer().host, ':'+str(peer.transport.getPeer().port) if peer.transport.getPeer().port != node.net.P2P_PORT else '') for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('peer_txpool_sizes', WebInterface(lambda: dict(('%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port), peer.remembered_txs_size) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('pings', WebInterface(defer.inlineCallbacks(lambda: defer.returnValue(
dict([(a, (yield b)) for a, b in
[(
'%s:%i' % (peer.transport.getPeer().host, peer.transport.getPeer().port),
defer.inlineCallbacks(lambda peer=peer: defer.returnValue(
min([(yield peer.do_ping().addCallback(lambda x: x/0.001).addErrback(lambda fail: None)) for i in xrange(3)])
))()
) for peer in list(node.p2p_node.peers.itervalues())]
])
))))
web_root.putChild('peer_versions', WebInterface(lambda: dict(('%s:%i' % peer.addr, peer.other_sub_version) for peer in node.p2p_node.peers.itervalues())))
web_root.putChild('payout_addr', WebInterface(lambda: bitcoin_data.pubkey_hash_to_address(wb.my_pubkey_hash, node.net.PARENT)))
web_root.putChild('recent_blocks', WebInterface(lambda: [dict(
ts=s.timestamp,
hash='%064x' % s.header_hash,
number=pack.IntType(24).unpack(s.share_data['coinbase'][1:4]) if len(s.share_data['coinbase']) >= 4 else None,
share='%064x' % s.hash,
) for s in node.tracker.get_chain(node.best_share_var.value, min(node.tracker.get_height(node.best_share_var.value), 24*60*60//node.net.SHARE_PERIOD)) if s.pow_hash <= s.header['bits'].target]))
web_root.putChild('uptime', WebInterface(lambda: time.time() - start_time))
web_root.putChild('stale_rates', WebInterface(lambda: p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, decent_height(), rates=True)))
new_root = resource.Resource()
web_root.putChild('web', new_root)
stat_log = []
if os.path.exists(os.path.join(datadir_path, 'stats')):
try:
with open(os.path.join(datadir_path, 'stats'), 'rb') as f:
stat_log = json.loads(f.read())
except:
log.err(None, 'Error loading stats:')
def update_stat_log():
while stat_log and stat_log[0]['time'] < time.time() - 24*60*60:
stat_log.pop(0)
lookbehind = 3600//node.net.SHARE_PERIOD
if node.tracker.get_height(node.best_share_var.value) < lookbehind:
return None
global_stale_prop = p2pool_data.get_average_stale_prop(node.tracker, node.best_share_var.value, lookbehind)
(stale_orphan_shares, stale_doa_shares), shares, _ = wb.get_stale_counts()
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
stat_log.append(dict(
time=time.time(),
pool_hash_rate=p2pool_data.get_pool_attempts_per_second(node.tracker, node.best_share_var.value, lookbehind)/(1-global_stale_prop),
pool_stale_prop=global_stale_prop,
local_hash_rates=miner_hash_rates,
local_dead_hash_rates=miner_dead_hash_rates,
shares=shares,
stale_shares=stale_orphan_shares + stale_doa_shares,
stale_shares_breakdown=dict(orphan=stale_orphan_shares, doa=stale_doa_shares),
current_payout=node.get_current_txouts().get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8,
peers=dict(
incoming=sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming),
outgoing=sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming),
),
attempts_to_share=bitcoin_data.target_to_average_attempts(node.tracker.items[node.best_share_var.value].max_target),
attempts_to_block=bitcoin_data.target_to_average_attempts(node.bitcoind_work.value['bits'].target),
block_value=node.bitcoind_work.value['subsidy']*1e-8,
))
with open(os.path.join(datadir_path, 'stats'), 'wb') as f:
f.write(json.dumps(stat_log))
x = deferral.RobustLoopingCall(update_stat_log)
x.start(5*60)
stop_event.watch(x.stop)
new_root.putChild('log', WebInterface(lambda: stat_log))
def get_share(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return None
share = node.tracker.items[int(share_hash_str, 16)]
return dict(
parent='%064x' % share.previous_hash,
children=['%064x' % x for x in sorted(node.tracker.reverse.get(share.hash, set()), key=lambda sh: -len(node.tracker.reverse.get(sh, set())))], # sorted from most children to least children
type_name=type(share).__name__,
local=dict(
verified=share.hash in node.tracker.verified.items,
time_first_seen=start_time if share.time_seen == 0 else share.time_seen,
peer_first_received_from=share.peer_addr,
),
share_data=dict(
timestamp=share.timestamp,
target=share.target,
max_target=share.max_target,
payout_address=bitcoin_data.script2_to_address(share.new_script, node.net.PARENT),
donation=share.share_data['donation']/65535,
stale_info=share.share_data['stale_info'],
nonce=share.share_data['nonce'],
desired_version=share.share_data['desired_version'],
absheight=share.absheight,
abswork=share.abswork,
),
block=dict(
hash='%064x' % share.header_hash,
header=dict(
version=share.header['version'],
previous_block='%064x' % share.header['previous_block'],
merkle_root='%064x' % share.header['merkle_root'],
timestamp=share.header['timestamp'],
target=share.header['bits'].target,
nonce=share.header['nonce'],
),
gentx=dict(
hash='%064x' % share.gentx_hash,
coinbase=share.share_data['coinbase'].ljust(2, '\x00').encode('hex'),
value=share.share_data['subsidy']*1e-8,
),
txn_count=len(list(share.iter_transaction_hash_refs())),
),
)
new_root.putChild('share', WebInterface(lambda share_hash_str: get_share(share_hash_str)))
new_root.putChild('heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.heads]))
new_root.putChild('verified_heads', WebInterface(lambda: ['%064x' % x for x in node.tracker.verified.heads]))
new_root.putChild('tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.tails for x in node.tracker.reverse.get(t, set())]))
new_root.putChild('verified_tails', WebInterface(lambda: ['%064x' % x for t in node.tracker.verified.tails for x in node.tracker.verified.reverse.get(t, set())]))
new_root.putChild('best_share_hash', WebInterface(lambda: '%064x' % node.best_share_var.value))
def get_share_data(share_hash_str):
if int(share_hash_str, 16) not in node.tracker.items:
return ''
share = node.tracker.items[int(share_hash_str, 16)]
return p2pool_data.share_type.pack(share.as_share1a())
new_root.putChild('share_data', WebInterface(lambda share_hash_str: get_share_data(share_hash_str), 'application/octet-stream'))
new_root.putChild('currency_info', WebInterface(lambda: dict(
symbol=node.net.PARENT.SYMBOL,
block_explorer_url_prefix=node.net.PARENT.BLOCK_EXPLORER_URL_PREFIX,
address_explorer_url_prefix=node.net.PARENT.ADDRESS_EXPLORER_URL_PREFIX,
)))
new_root.putChild('version', WebInterface(lambda: p2pool.__version__))
hd_path = os.path.join(datadir_path, 'graph_db')
hd_data = _atomic_read(hd_path)
hd_obj = {}
if hd_data is not None:
try:
hd_obj = json.loads(hd_data)
except Exception:
log.err(None, 'Error reading graph database:')
dataview_descriptions = {
'last_hour': graph.DataViewDescription(150, 60*60),
'last_day': graph.DataViewDescription(300, 60*60*24),
'last_week': graph.DataViewDescription(300, 60*60*24*7),
'last_month': graph.DataViewDescription(300, 60*60*24*30),
'last_year': graph.DataViewDescription(300, 60*60*24*365.25),
}
def build_desired_rates(ds_name, ds_desc, dv_name, dv_desc, obj):
if not obj:
last_bin_end = 0
bins = dv_desc.bin_count*[{}]
else:
pool_rates = obj['pool_rates'][dv_name]
desired_versions = obj['desired_versions'][dv_name]
def get_total_pool_rate(t):
n = int((pool_rates['last_bin_end'] - t)/dv_desc.bin_width)
if n < 0 or n >= dv_desc.bin_count:
return None
total = sum(x[0] for x in pool_rates['bins'][n].values())
count = math.mean(x[1] for x in pool_rates['bins'][n].values())
if count == 0:
return None
return total/count
last_bin_end = desired_versions['last_bin_end']
bins = [dict((name, (total*get_total_pool_rate(last_bin_end - (i+1/2)*dv_desc.bin_width), count)) for name, (total, count) in desired_versions['bins'][i].iteritems()) for i in xrange(dv_desc.bin_count)]
return graph.DataView(dv_desc, ds_desc, last_bin_end, bins)
hd = graph.HistoryDatabase.from_obj({
'local_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'local_dead_share_hash_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False),
'pool_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'current_payout': graph.DataStreamDescription(dataview_descriptions),
'current_payouts': graph.DataStreamDescription(dataview_descriptions, multivalues=True),
'incoming_peers': graph.DataStreamDescription(dataview_descriptions),
'outgoing_peers': graph.DataStreamDescription(dataview_descriptions),
'miner_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'miner_dead_hash_rates': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'desired_versions': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True),
'desired_version_rates': graph.DataStreamDescription(dataview_descriptions, multivalues=True,
multivalue_undefined_means_0=True, default_func=build_desired_rates),
'traffic_rate': graph.DataStreamDescription(dataview_descriptions, is_gauge=False, multivalues=True),
'getwork_latency': graph.DataStreamDescription(dataview_descriptions),
'memory_usage': graph.DataStreamDescription(dataview_descriptions),
}, hd_obj)
x = deferral.RobustLoopingCall(lambda: _atomic_write(hd_path, json.dumps(hd.to_obj())))
x.start(100)
stop_event.watch(x.stop)
@wb.pseudoshare_received.watch
def _(work, dead, user):
t = time.time()
hd.datastreams['local_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_hash_rate'].add_datum(t, work)
if user is not None:
hd.datastreams['miner_hash_rates'].add_datum(t, {user: work})
if dead:
hd.datastreams['miner_dead_hash_rates'].add_datum(t, {user: work})
@wb.share_received.watch
def _(work, dead):
t = time.time()
hd.datastreams['local_share_hash_rate'].add_datum(t, work)
if dead:
hd.datastreams['local_dead_share_hash_rate'].add_datum(t, work)
@node.p2p_node.traffic_happened.watch
def _(name, bytes):
hd.datastreams['traffic_rate'].add_datum(time.time(), {name: bytes})
def add_point():
if node.tracker.get_height(node.best_share_var.value) < 10:
return None
lookbehind = min(node.net.CHAIN_LENGTH, 60*60//node.net.SHARE_PERIOD, node.tracker.get_height(node.best_share_var.value))
t = time.time()
pool_rates = p2pool_data.get_stale_counts(node.tracker, node.best_share_var.value, lookbehind, rates=True)
pool_total = sum(pool_rates.itervalues())
hd.datastreams['pool_rates'].add_datum(t, pool_rates)
current_txouts = node.get_current_txouts()
hd.datastreams['current_payout'].add_datum(t, current_txouts.get(bitcoin_data.pubkey_hash_to_script2(wb.my_pubkey_hash), 0)*1e-8)
miner_hash_rates, miner_dead_hash_rates = wb.get_local_rates()
current_txouts_by_address = dict((bitcoin_data.script2_to_address(script, node.net.PARENT), amount) for script, amount in current_txouts.iteritems())
hd.datastreams['current_payouts'].add_datum(t, dict((user, current_txouts_by_address[user]*1e-8) for user in miner_hash_rates if user in current_txouts_by_address))
hd.datastreams['incoming_peers'].add_datum(t, sum(1 for peer in node.p2p_node.peers.itervalues() if peer.incoming))
hd.datastreams['outgoing_peers'].add_datum(t, sum(1 for peer in node.p2p_node.peers.itervalues() if not peer.incoming))
vs = p2pool_data.get_desired_version_counts(node.tracker, node.best_share_var.value, lookbehind)
vs_total = sum(vs.itervalues())
hd.datastreams['desired_versions'].add_datum(t, dict((str(k), v/vs_total) for k, v in vs.iteritems()))
hd.datastreams['desired_version_rates'].add_datum(t, dict((str(k), v/vs_total*pool_total) for k, v in vs.iteritems()))
try:
hd.datastreams['memory_usage'].add_datum(t, memory.resident())
except:
if p2pool.DEBUG:
traceback.print_exc()
x = deferral.RobustLoopingCall(add_point)
x.start(5)
stop_event.watch(x.stop)
@node.bitcoind_work.changed.watch
def _(new_work):
hd.datastreams['getwork_latency'].add_datum(time.time(), new_work['latency'])
new_root.putChild('graph_data', WebInterface(lambda source, view: hd.datastreams[source].dataviews[view].get_data(time.time())))
web_root.putChild('static', static.File(os.path.join(os.path.dirname(sys.argv[0]), 'web-static')))
return web_root
| gpl-3.0 | 3,808,312,413,652,431,000 | 54.922737 | 260 | 0.6306 | false | 3.469799 | false | false | false |
d1m0/pyelf | __init__.py | 1 | 13904 | from pylibelf import *
from pylibelf.types import *
from pylibelf.iterators import *
from pylibelf.constants import *
from pylibelf.util import *
from pylibelf.util.syms import *
from pylibelf.macros import *
from bisect import bisect_left
import pylibelf.util
import pylibelf
import types
import os
def _inrange(x, a,b):
return x>=a and x < b
def _overlap(a, b, c, d):
return a <= d and c <= b
class Bunch:
def __setitem__(self, k, v): self.__dict__[k] = v
def __getitem__(self, k): return self.__dict__[k]
class BaseElfNode(object):
@staticmethod
def extract(obj):
return BaseElfNode._extract(obj, {})
@staticmethod
def _extract(obj, m):
""" Given a BaseElfNode object extract a static snapshot of the current
object and its children that does not refer to the parent or any pylibelf
objects
"""
if isinstance(obj, BaseElfNode):
if obj in m:
return m[obj]
res = Bunch()
m[obj] = res
for attr in dir(obj):
if (isinstance(obj, ElfSym) and attr == 'contents' and not obj.defined):
v = None
elif (isinstance(obj, ElfScn) and (attr == 'info_scn' or attr == 'link_scn' or attr == 'index')):
try:
v = getattr(obj, attr)
except ElfError: # This section doesn't have a info_scn or a link_scn
v = None
else:
v = getattr(obj, attr)
if hasattr(v, "__call__"):
# This is a function - ignore
continue
try:
res[attr] = BaseElfNode._extract(v, m)
except AttributeError: pass
return res
elif type(obj) == list:
return map(lambda x: BaseElfNode._extract(x, m), obj)
elif type(obj) == tuple:
return tuple(map(lambda x: BaseElfNode._extract(x, m), obj))
elif type(obj) == dict:
return dict([(BaseElfNode.extract(k, m), BaseElfNode.extract(v, m)) for (k,v) in obj.items()])
elif type(obj) in [int, str, long, bool, types.NoneType]:
return obj
else:
print type(obj), obj
return None
def __init__(self, elf, pt, obj, typ = None, addFields = []):
assert(pt == None or isinstance(pt, BaseElfNode))
self._elf = elf
self._pt = pt
self._obj = obj
self._ptr = cast(self._obj, c_void_p).value
self._typ = typ
# All object's memoization cache points to the root elf file's memoization cache
if (isinstance(self, Elf)):
self._cache = {}
else:
while (not isinstance(pt, Elf)): pt = pt._pt
self._cache = pt._cache
self._fields = []
if self._typ != None:
self._fields += map(lambda x: x[0], self._typ._fields_)
self._fields += addFields
def _select(self, name): return select(self._elf, name)
def __getattr__(self, name):
cache = self._cache
key = (self._ptr, name)
if (key in cache):
return cache[key]
res = self._getattr_impl(name)
if (isinstance(res, types.GeneratorType)):
cache[key] = list(res)
else:
cache[key] = res
return res
def _getattr_impl(self, name):
try:
if (self._obj != None):
inner = self._obj.contents
else:
return 0
except AttributeError:
raise Exception("Can't access %s in %s - not a pointer" % \
(name, str(self._obj)))
return getattr(inner, name)
def _getelf(self):
p = self
while not isinstance(p, Elf):
p = p._pt
return p
def _class(self):
return pylibelf.util._class(self._elf)
def __dir__(self):
return self._fields
class ElfEhdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Ehdr if is64(elf) else Elf32_Ehdr, [])
class ElfShdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Shdr if is64(elf) else Elf32_Shdr, ['name'])
def _getattr_impl(self, name):
if (name == "name"):
return elf_strptr(self._elf, self._pt._pt.ehdr.e_shstrndx, self._obj.contents.sh_name)
else:
return BaseElfNode._getattr_impl(self, name)
class ElfSym(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj,
Elf64_Sym if is64(elf) else Elf32_Sym, ['name', 'section', 'defined', \
'contents', 'type', 'binding', 'targetScn', 'index'])
def _getattr_impl(self, name):
if (name == "name"):
return elf_strptr(self._elf, self._pt.shdr.sh_link, self._obj.contents.st_name)
elif (name == "section"):
return self._pt
elif (name == "defined"):
return self.st_shndx != SHN_UNDEF
elif (name == "type"):
if is64(self._elf):
return ELF64_ST_TYPE(self.st_info)
else:
return ELF32_ST_TYPE(self.st_info)
elif (name == "binding"):
if is64(self._elf):
return ELF64_ST_BIND(self.st_info)
else:
return ELF32_ST_BIND(self.st_info)
elif (name == "targetScn"):
return self._pt._pt.section(self.st_shndx)
elif (name == "contents"):
targetSec = self._pt._pt.section(self.st_shndx)
relas = []
for relaScn in targetSec.relaScns:
# [self.st_value ...
start = bisect_left(relaScn.relas, self.st_value)
# ... self.st_value + self.st_size)
end = bisect_left(relaScn.relas, self.st_value + self.st_size)
relas.extend(relaScn.relas[start:end])
# Testing only
#for r in relas:
# assert(r.r_offset >= self.st_value and r.r_offset < self.st_value + self.st_size)
#TODO: rels
rels = []
mem = targetSec.memInRange(self.st_value, self.st_size)
return (mem, rels, relas)
elif (name == "index"):
size = sizeof(self._typ)
ptr = cast(self._obj, c_voidp).value
ind = None
for d in self.section.data():
if d.d_buf <= ptr and d.d_buf + d.d_size > ptr:
assert (ptr - d.d_buf) % size == 0, "Misaligned symbol pointer %d in section %s" % \
(ptr, self.section.shdr.name)
ind = (ptr - d.d_buf) / size
assert ind != None, "Symbol not found in section!"
return ind
else:
return BaseElfNode._getattr_impl(self, name)
class ElfRela(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, \
Elf64_Rela if is64(elf) else Elf32_Rela, ['sym'])
def _getattr_impl(self, name):
if (name == "sym"):
elfO = self._getelf()
scn = elfO.section(self._pt.shdr.sh_link)
symInd = ELF64_R_SYM(self.r_info) if is64(self._elf) else \
ELF32_R_SYM(self.r_info)
return ElfSym(self._elf, scn, scn.sym(symInd)._obj)
else:
return BaseElfNode._getattr_impl(self, name)
def __cmp__(self, other):
if type(other) == long or type(other) == int:
if self.r_offset < other:
return -1
elif self.r_offset == other:
return 0
else:
return 1
raise Exception("NYI")
class ElfRel(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, \
Elf64_Rel if is64(elf) else Elf32_Rel, ['sym'])
def _getattr_impl(self, name):
if (name == "sym"):
elfO = self._getelf()
scn = elfO.section(self._pt.shdr.sh_link)
symInd = ELF64_R_SYM(self.r_info) if is64(self._elf) else \
ELF32_R_SYM(self.r_info)
return ElfSym(self._elf, scn, scn.sym(symInd)._obj)
else:
return BaseElfNode._getattr_impl(self, name)
class ElfData(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Data, [])
class ElfArhdr(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Arhdr, [])
class ElfScn(BaseElfNode):
def __init__(self, elf, pt, obj):
BaseElfNode.__init__(self, elf, pt, obj, Elf_Scn,\
['index', 'shdr', 'link_scn', 'info_scn', 'syms', 'relas', 'relaScns', 'sym', 'data', 'memInRange',
'relasInRange', 'strAtAddr'])
def _getattr_impl(self, name):
if (name == "index"):
return elf_ndxscn(self._obj)
elif (name == "shdr"):
return ElfShdr(self._elf, self, select(self._elf, 'getshdr')(self._obj))
elif (name == "link_scn" and self.shdr.sh_link != SHN_UNDEF):
return ElfScn(self._elf, self._pt, elf_getscn(self._elf, \
self.shdr.sh_link))
elif (name == "info_scn" and (self.shdr.sh_type == SHT_REL or \
self.shdr.sh_type == SHT_RELA)):
return ElfScn(self._elf, self._pt, elf_getscn(self._elf, \
self.shdr.sh_info))
elif (name == "syms" and self.shdr.sh_type in [SHT_SYMTAB, SHT_DYNSYM]):
symT = Elf32_Sym if (is32(self._elf)) else Elf64_Sym
return reduce(lambda a,c: a+c, \
map(lambda d: map(lambda sym: ElfSym(self._elf, self, pointer(sym)), \
list(arr_iter(d, symT))), list(data(self._obj))))
elif (name == "relas" and self.shdr.sh_type == SHT_RELA):
relaT = Elf32_Rela if (is32(self._elf)) else Elf64_Rela
return reduce(lambda a,c: a+c, \
map(lambda d: map(lambda rela: ElfRela(self._elf, self, pointer(rela)),\
list(arr_iter(d, relaT))), list(data(self._obj))))
elif (name == "relaScns"):
return [s for s in self._pt.sections if s.shdr.sh_info == self.index\
and s.shdr.sh_type == SHT_RELA]
elif (name == "name"):
return self.shdr.name
else:
return BaseElfNode._getattr_impl(self, name)
def sym(self, ind):
shtype = self.shdr.sh_type
if shtype not in [SHT_SYMTAB, SHT_DYNSYM]:
raise Exception("Section %s does not contain symbols" % (self.shdr.name,))
return self.syms[ind]
def data(self):
d = None
while True:
d = elf_getdata(self._obj, d)
if not bool(d): break
yield ElfData(self._elf, self, d)
def memInRange(self, start, size):
r = ''
off = 0
base = self.shdr.sh_addr
end = start + size
for d in self.data():
if start >= end: break;
off = base + d.d_off
if start >= off and start < off + d.d_size:
c = cast(d.d_buf, POINTER(c_char))
l = min(off + d.d_size, end) - start
r += c[start- off : start - off + l]
start += l
return r
def relasInRange(self, start, size):
relas = []
for relaScn in self.relaScns:
# [self.st_value ...
start = bisect_left(relaScn.relas, start)
# ... self.st_value + self.st_size)
end = bisect_left(relaScn.relas, start + size)
relas.extend(relaScn.relas[start:end])
return relas
def strAtAddr(self, ptr):
r = ''
off = 0
base = self.shdr.sh_addr
start = ptr - base
for d in self.data():
off = d.d_off
c = cast(d.d_buf, POINTER(c_char))
while (start >= off and start < off + d.d_size):
if c[start] == '\x00':
break
r += c[start]
start += 1
return r
class Elf(BaseElfNode):
def __init__(self, elf, pt=None, claz = None):
if type(elf) == str:
self.fd = os.open(elf, os.O_RDONLY)
elf = elf_begin(self.fd, ELF_C_READ, None)
elif isinstance(elf, ElfP):
self.fd = None
else:
raise Exception("Invalid input to Elf.__init__(): %s" % (str(elf), ))
if claz != None:
self._class = claz
else:
self._class = pylibelf.util._class(elf)
BaseElfNode.__init__(self, elf, pt, elf, pylibelf.types.Elf, \
['ehdr', 'shstrndx', 'arhdr', 'sections', 'section', 'syms', 'findSym'])
self._symsMap = dict([
(sym.name, sym) for sym in self.syms()
])
self._secMap = dict([
(elf_ndxscn(s._obj), s) for s in self.sections
])
nullScn = ElfScn(self._elf, self, None)
self._secMap[0] = nullScn
def finalize(self):
elf_end(self._elf)
if self.fd != None:
os.close(self.fd)
def _getattr_impl(self, name):
if (name == "ehdr"):
return ElfEhdr(self._elf, self, self._select("getehdr")(self._elf))
elif (name == "shstrndx"):
return self.ehdr.e_shstrndx
elif (name == "arhdr"):
arhdr = elf_getarhdr(self._elf)
if (bool(arhdr)):
return ElfArhdr(self._elf, self, arhdr)
else:
raise AttributeError("Elf file doesn't have an arhdr")
elif (name == "sections"):
return [ ElfScn(self._elf, self, pointer(s)) for s in
sections(self._elf) ]
elif (name == "relasMap"):
return dict([(s.index, s.relas) \
for s in self.sections if s.shdr.sh_type == SHT_RELA])
else:
return BaseElfNode._getattr_impl(self, name)
def section(self, ind):
return self._secMap[ind]
def syms(self):
for scn in self.sections:
if scn.shdr.sh_type != SHT_SYMTAB and scn.shdr.sh_type != SHT_DYNSYM:
continue
for sym in syms(self._elf, scn._obj.contents):
yield ElfSym(self._elf, scn, pointer(sym[1]))
def findSym(self, name):
try:
return self._symsMap[name]
except:
return None
def deref(self, addr, size):
r = None
for s in self.sections:
# TODO(dbounov): Hack, due to .tbss overlapping other sections. Figure out correct way to deal with this.
if s.shdr.name == ".tbss":
continue
if _overlap(addr, addr+size - 1, s.shdr.sh_addr, s.shdr.sh_addr + s.shdr.sh_size - 1):
assert r == None # Currently support address ranges in a single section only
r = (s.memInRange(addr, size), [], s.relasInRange(addr, size) )
return r
class Ar:
def __init__(self, fname, claz):
self._fname = fname
self._class = claz
def elfs(self):
self.fd = os.open(self._fname, os.O_RDONLY)
ar = elf_begin(self.fd, ELF_C_READ, None)
while True:
e = elf_begin(self.fd, ELF_C_READ, ar)
if (not bool(e)): break
yield Elf(e, None, self._class)
elf_end(ar)
os.close(self.fd)
__all__ = [ 'BaseElfNode', 'ElfEhdr', 'ElfShdr', 'ElfSym', 'ElfRela', \
'ElfData', 'ElfArhdr', 'ElfScn', 'Elf', 'Ar' ]
| mit | -9,088,902,695,502,797,000 | 29.095238 | 111 | 0.583357 | false | 2.980493 | false | false | false |
bburan/psiexperiment | psi/data/plots.py | 1 | 35393 | import logging
log = logging.getLogger(__name__)
import itertools
import importlib
from functools import partial
from collections import defaultdict
import numpy as np
import pandas as pd
import pyqtgraph as pg
from atom.api import (Unicode, Float, Tuple, Int, Typed, Property, Atom, Bool,
Enum, List, Dict, Callable, Value)
from enaml.application import deferred_call, timed_call
from enaml.colors import parse_color
from enaml.core.api import Looper, Declarative, d_, d_func
from enaml.qt.QtGui import QColor
from psi.util import SignalBuffer, ConfigurationException
from psi.core.enaml.api import load_manifests, PSIContribution
from psi.controller.calibration import util
from psi.context.context_item import ContextMeta
################################################################################
# Utility functions
################################################################################
def get_x_fft(fs, duration):
n_time = int(fs * duration)
freq = np.fft.rfftfreq(n_time, fs**-1)
return np.log10(freq)
def get_color_cycle(name):
module_name, cmap_name = name.rsplit('.', 1)
module = importlib.import_module(module_name)
cmap = getattr(module, cmap_name)
return itertools.cycle(cmap.colors)
def make_color(color):
if isinstance(color, tuple):
return QColor(*color)
elif isinstance(color, str):
return QColor(color)
else:
raise ValueError('Unknown color %r', color)
################################################################################
# Style mixins
################################################################################
class ColorCycleMixin(Declarative):
#: Define the pen color cycle. Can be a list of colors or a string
#: indicating the color palette to use in palettable.
pen_color_cycle = d_(Typed(object))
_plot_colors = Typed(dict)
def _make_plot_cycle(self):
if isinstance(self.pen_color_cycle, str):
cycle = get_color_cycle(self.pen_color_cycle)
else:
cycle = itertools.cycle(self.pen_color_cycle)
return defaultdict(lambda: next(cycle))
@d_func
def get_pen_color(self, key):
if self._plot_colors is None:
self._plot_colors = self._make_plot_cycle()
color = self._plot_colors[key]
if not isinstance(color, str):
return QColor(*color)
else:
return QColor(color)
def _observe_pen_color_cycle(self, event):
self._plot_colors = self._make_plot_cycle()
self.reset_plots()
def reset_plots(self):
raise NotImplementedError
################################################################################
# Supporting classes
################################################################################
class BaseDataRange(Atom):
container = Typed(object)
# Size of display window
span = Float(1)
# Delay before clearing window once data has "scrolled off" the window.
delay = Float(0)
# Current visible data range
current_range = Tuple(Float(), Float())
def add_source(self, source):
cb = partial(self.source_added, source=source)
source.add_callback(cb)
def _default_current_range(self):
return 0, self.span
def _observe_delay(self, event):
self._update_range()
def _observe_span(self, event):
self._update_range()
def _update_range(self):
raise NotImplementedError
class EpochDataRange(BaseDataRange):
max_duration = Float()
def source_added(self, data, source):
n = [len(d['signal']) for d in data]
max_duration = max(n) / source.fs
self.max_duration = max(max_duration, self.max_duration)
def _observe_max_duration(self, event):
self._update_range()
def _update_range(self):
self.current_range = 0, self.max_duration
class ChannelDataRange(BaseDataRange):
# Automatically updated. Indicates last "seen" time based on all data
# sources reporting to this range.
current_time = Float(0)
current_samples = Typed(defaultdict, (int,))
current_times = Typed(defaultdict, (float,))
def _observe_current_time(self, event):
self._update_range()
def _update_range(self):
low_value = (self.current_time//self.span)*self.span - self.delay
high_value = low_value+self.span
self.current_range = low_value, high_value
def add_event_source(self, source):
cb = partial(self.event_source_added, source=source)
source.add_callback(cb)
def source_added(self, data, source):
self.current_samples[source] += data.shape[-1]
self.current_times[source] = self.current_samples[source]/source.fs
self.current_time = max(self.current_times.values())
def event_source_added(self, data, source):
self.current_times[source] = data[-1][1]
self.current_time = max(self.current_times.values())
def create_container(children, x_axis=None):
container = pg.GraphicsLayout()
container.setSpacing(10)
# Add the x and y axes to the layout, along with the viewbox.
for i, child in enumerate(children):
container.addItem(child.y_axis, i, 0)
container.addItem(child.viewbox, i, 1)
if x_axis is not None:
container.addItem(x_axis, i+1, 1)
# Link the child viewboxes together
for child in children[1:]:
child.viewbox.setXLink(children[0].viewbox)
#children[0].viewbox.setXRange(0, 100, padding=0)
return container
################################################################################
# Pattern containers
################################################################################
class MultiPlotContainer(Looper, PSIContribution):
group = d_(Unicode())
containers = d_(Dict())
_workbench = Value()
selected_item = Value()
def refresh_items(self):
super().refresh_items()
if not self.iterable:
return
self.containers = {str(i): c[0].container for \
i, c in zip(self.iterable, self.items)}
load_manifests(self.items, self._workbench)
for item in self.items:
load_manifests(item, self._workbench)
load_manifests(item[0].children, self._workbench)
deferred_call(item[0].format_container)
################################################################################
# Containers (defines a shared set of containers across axes)
################################################################################
class BasePlotContainer(PSIContribution):
label = d_(Unicode())
container = Typed(pg.GraphicsWidget)
x_axis = Typed(pg.AxisItem)
base_viewbox = Property()
legend = Typed(pg.LegendItem)
def _default_container(self):
return create_container(self.children, self.x_axis)
def _default_legend(self):
legend = pg.LegendItem()
legend.setParentItem(self.container)
return legend
def _get_base_viewbox(self):
return self.children[0].viewbox
def _default_x_axis(self):
x_axis = pg.AxisItem('bottom')
x_axis.setGrid(64)
x_axis.linkToView(self.children[0].viewbox)
return x_axis
def update(self, event=None):
pass
def find(self, name):
for child in self.children:
if child.name == name:
return child
def format_container(self):
pass
def _reset_plots(self):
pass
class PlotContainer(BasePlotContainer):
x_min = d_(Float(0))
x_max = d_(Float(0))
def format_container(self):
# If we want to specify values relative to a psi context variable, we
# cannot do it when initializing the plots.
if (self.x_min != 0) or (self.x_max != 0):
self.base_viewbox.setXRange(self.x_min, self.x_max, padding=0)
def update(self, event=None):
deferred_call(self.format_container)
class BaseTimeContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same time-based X-axis
'''
data_range = Typed(BaseDataRange)
span = d_(Float(1))
delay = d_(Float(0.25))
def _default_container(self):
container = super()._default_container()
# Ensure that the x axis shows the planned range
self.base_viewbox.setXRange(0, self.span, padding=0)
self.data_range.observe('current_range', self.update)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Time', unitPrefix='sec.')
return x_axis
def update(self, event=None):
low, high = self.data_range.current_range
deferred_call(self.base_viewbox.setXRange, low, high, padding=0)
super().update()
class TimeContainer(BaseTimeContainer):
def _default_data_range(self):
return ChannelDataRange(container=self, span=self.span,
delay=self.delay)
def update(self, event=None):
for child in self.children:
child.update()
super().update()
class EpochTimeContainer(BaseTimeContainer):
def _default_data_range(self):
return EpochDataRange(container=self, span=self.span, delay=self.delay)
def format_log_ticks(values, scale, spacing):
values = 10**np.array(values).astype(np.float)
return ['{:.1f}'.format(v) for v in values]
class FFTContainer(BasePlotContainer):
'''
Contains one or more viewboxes that share the same frequency-based X-axis
'''
freq_lb = d_(Float(5))
freq_ub = d_(Float(50000))
def _default_container(self):
container = super()._default_container()
self.base_viewbox.setXRange(np.log10(self.freq_lb),
np.log10(self.freq_ub),
padding=0)
return container
def _default_x_axis(self):
x_axis = super()._default_x_axis()
x_axis.setLabel('Frequency (Hz)')
x_axis.logTickStrings = format_log_ticks
x_axis.setLogMode(True)
return x_axis
################################################################################
# ViewBox
################################################################################
class ViewBox(PSIContribution):
viewbox = Typed(pg.ViewBox)
y_axis = Typed(pg.AxisItem)
y_mode = d_(Enum('symmetric', 'upper'))
y_min = d_(Float(0))
y_max = d_(Float(0))
allow_zoom_y = d_(Bool(True))
allow_zoom_x = d_(Bool(False))
data_range = Property()
def _default_name(self):
return self.label
def _get_data_range(self):
return self.parent.data_range
def _default_y_axis(self):
y_axis = pg.AxisItem('left')
y_axis.setLabel(self.label)
y_axis.linkToView(self.viewbox)
y_axis.setGrid(64)
return y_axis
def _default_viewbox(self):
viewbox = pg.ViewBox(enableMenu=False)
viewbox.setMouseEnabled(x=False, y=True)
viewbox.setBackgroundColor('w')
if (self.y_min != 0) or (self.y_max != 0):
viewbox.disableAutoRange()
viewbox.setYRange(self.y_min, self.y_max)
for child in self.children:
for plot in child.get_plots():
viewbox.addItem(plot)
return viewbox
def update(self, event=None):
for child in self.children:
child.update()
def add_plot(self, plot, label=None):
self.viewbox.addItem(plot)
if label:
self.parent.legend.addItem(plot, label)
def plot(self, x, y, color='k', log_x=False, log_y=False, label=None,
kind='line'):
'''
Convenience function used by plugins
This is typically used in post-processing routines to add static plots
to existing view boxes.
'''
if log_x:
x = np.log10(x)
if log_y:
y = np.log10(y)
x = np.asarray(x)
y = np.asarray(y)
m = np.isfinite(x) & np.isfinite(y)
x = x[m]
y = y[m]
if kind == 'line':
item = pg.PlotCurveItem(pen=pg.mkPen(color))
elif kind == 'scatter':
item = pg.ScatterPlotItem(pen=pg.mkPen(color))
item.setData(x, y)
self.add_plot(item)
if label is not None:
self.parent.legend.addItem(item, label)
################################################################################
# Plots
################################################################################
class BasePlot(PSIContribution):
# Make this weak-referenceable so we can bind methods to Qt slots.
__slots__ = '__weakref__'
source_name = d_(Unicode())
source = Typed(object)
label = d_(Unicode())
def update(self, event=None):
pass
def _reset_plots(self):
pass
################################################################################
# Single plots
################################################################################
class SinglePlot(BasePlot):
pen_color = d_(Typed(object))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
label = d_(Unicode())
pen = Typed(object)
plot = Typed(object)
def get_plots(self):
return [self.plot]
def _default_pen_color(self):
return 'black'
def _default_pen(self):
color = make_color(self.pen_color)
return pg.mkPen(color, width=self.pen_width)
def _default_name(self):
return self.source_name + '_plot'
class ChannelPlot(SinglePlot):
downsample = Int(0)
decimate_mode = d_(Enum('extremes', 'mean'))
_cached_time = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_channel_plot'
def _default_plot(self):
return pg.PlotCurveItem(pen=self.pen, antialias=self.antialias)
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_source(self.source)
self.parent.data_range.observe('span', self._update_time)
self.source.add_callback(self._append_data)
self.parent.viewbox.sigResized.connect(self._update_decimation)
self._update_time(None)
self._update_decimation(self.parent.viewbox)
def _update_time(self, event):
# Precompute the time array since this can be the "slow" point
# sometimes in computations
n = round(self.parent.data_range.span*self.source.fs)
self._cached_time = np.arange(n)/self.source.fs
self._update_decimation()
self._update_buffer()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs,
self.parent.data_range.span*2)
def _update_decimation(self, viewbox=None):
try:
width, _ = self.parent.viewbox.viewPixelSize()
dt = self.source.fs**-1
self.downsample = round(width/dt/2)
except Exception as e:
pass
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def update(self, event=None):
low, high = self.parent.data_range.current_range
data = self._buffer.get_range_filled(low, high, np.nan)
t = self._cached_time[:len(data)] + low
if self.downsample > 1:
t = t[::self.downsample]
if self.decimate_mode == 'extremes':
d_min, d_max = decimate_extremes(data, self.downsample)
t = t[:len(d_min)]
x = np.c_[t, t].ravel()
y = np.c_[d_min, d_max].ravel()
if x.shape == y.shape:
deferred_call(self.plot.setData, x, y, connect='pairs')
elif self.decimate_mode == 'mean':
d = decimate_mean(data, self.downsample)
t = t[:len(d)]
if t.shape == d.shape:
deferred_call(self.plot.setData, t, d)
else:
t = t[:len(data)]
deferred_call(self.plot.setData, t, data)
def _reshape_for_decimate(data, downsample):
# Determine the "fragment" size that we are unable to decimate. A
# downsampling factor of 5 means that we perform the operation in chunks of
# 5 samples. If we have only 13 samples of data, then we cannot decimate
# the last 3 samples and will simply discard them.
last_dim = data.ndim
offset = data.shape[-1] % downsample
if offset > 0:
data = data[..., :-offset]
shape = (len(data), -1, downsample) if data.ndim == 2 else (-1, downsample)
return data.reshape(shape)
def decimate_mean(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
data = _reshape_for_decimate(data, downsample).copy()
return data.mean(axis=-1)
def decimate_extremes(data, downsample):
# If data is empty, return imediately
if data.size == 0:
return np.array([]), np.array([])
# Force a copy to be made, which speeds up min()/max(). Apparently min/max
# make a copy of a reshaped array before performing the operation, so we
# force it now so the copy only occurs once.
data = _reshape_for_decimate(data, downsample).copy()
return data.min(axis=-1), data.max(axis=-1)
class FFTChannelPlot(ChannelPlot):
time_span = d_(Float(1))
window = d_(Enum('hamming', 'flattop'))
_x = Typed(np.ndarray)
_buffer = Typed(SignalBuffer)
def _default_name(self):
return self.source_name + '_fft_plot'
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._append_data)
self.source.observe('fs', self._cache_x)
self._update_buffer()
self._cache_x()
def _update_buffer(self, event=None):
self._buffer = SignalBuffer(self.source.fs, self.time_span)
def _append_data(self, data):
self._buffer.append_data(data)
self.update()
def _cache_x(self, event=None):
if self.source.fs:
self._x = get_x_fft(self.source.fs, self.time_span)
def update(self, event=None):
if self._buffer.get_time_ub() >= self.time_span:
data = self._buffer.get_latest(-self.time_span, 0)
#psd = util.patodb(util.psd(data, self.source.fs, self.window))
psd = util.psd(data, self.source.fs, self.window)
spl = self.source.calibration.get_spl(self._x, psd)
deferred_call(self.plot.setData, self._x, spl)
class BaseTimeseriesPlot(SinglePlot):
rect_center = d_(Float(0.5))
rect_height = d_(Float(1))
fill_color = d_(Typed(object))
brush = Typed(object)
_rising = Typed(list, ())
_falling = Typed(list, ())
def _default_brush(self):
return pg.mkBrush(self.fill_color)
def _default_plot(self):
plot = pg.QtGui.QGraphicsPathItem()
plot.setPen(self.pen)
plot.setBrush(self.brush)
return plot
def update(self, event=None):
lb, ub = self.parent.data_range.current_range
current_time = self.parent.data_range.current_time
starts = self._rising
ends = self._falling
if len(starts) == 0 and len(ends) == 1:
starts = [0]
elif len(starts) == 1 and len(ends) == 0:
ends = [current_time]
elif len(starts) > 0 and len(ends) > 0:
if starts[0] > ends[0]:
starts = np.r_[0, starts]
if starts[-1] > ends[-1]:
ends = np.r_[ends, current_time]
try:
epochs = np.c_[starts, ends]
except ValueError as e:
log.exception(e)
log.warning('Unable to update %r, starts shape %r, ends shape %r',
self, starts, ends)
return
m = ((epochs >= lb) & (epochs < ub)) | np.isnan(epochs)
epochs = epochs[m.any(axis=-1)]
path = pg.QtGui.QPainterPath()
y_start = self.rect_center - self.rect_height*0.5
for x_start, x_end in epochs:
x_width = x_end-x_start
r = pg.QtCore.QRectF(x_start, y_start, x_width, self.rect_height)
path.addRect(r)
deferred_call(self.plot.setPath, path)
class EventPlot(BaseTimeseriesPlot):
event = d_(Unicode())
def _observe_event(self, event):
if self.event is not None:
self.parent.data_range.observe('current_time', self.update)
def _default_name(self):
return self.event + '_timeseries'
def _append_data(self, bound, timestamp):
if bound == 'start':
self._rising.append(timestamp)
elif bound == 'end':
self._falling.append(timestamp)
self.update()
class TimeseriesPlot(BaseTimeseriesPlot):
source_name = d_(Unicode())
source = Typed(object)
def _default_name(self):
return self.source_name + '_timeseries'
def _observe_source(self, event):
if self.source is not None:
self.parent.data_range.add_event_source(self.source)
self.parent.data_range.observe('current_time', self.update)
self.source.add_callback(self._append_data)
def _append_data(self, data):
for (etype, value) in data:
if etype == 'rising':
self._rising.append(value)
elif etype == 'falling':
self._falling.append(value)
################################################################################
# Group plots
################################################################################
class GroupMixin(ColorCycleMixin):
source = Typed(object)
group_meta = d_(Unicode())
groups = d_(Typed(ContextMeta))
group_names = d_(List())
#: Function that takes the epoch metadata and decides whether to accept it
#: for plotting. Useful to reduce the number of plots shown on a graph.
group_filter = d_(Callable())
#: Function that takes the epoch metadata and returns a key indicating
#: which group it should included in for plotting.
group_color_key = d_(Callable())
pen_width = d_(Int(0))
antialias = d_(Bool(False))
plots = Dict()
_data_cache = Typed(object)
_data_count = Typed(object)
_data_updated = Typed(object)
_data_n_samples = Typed(object)
_pen_color_cycle = Typed(object)
_plot_colors = Typed(object)
_x = Typed(np.ndarray)
n_update = d_(Int(1))
def _default_group_names(self):
return [p.name for p in self.groups.values]
def _default_group_filter(self):
return lambda key: True
def _default_group_color_key(self):
return lambda key: tuple(key[g] for g in self.group_names)
def get_pen_color(self, key):
kw_key = {n: k for n, k in zip(self.group_names, key)}
group_key = self.group_color_key(kw_key)
return super().get_pen_color(group_key)
def reset_plots(self):
# Clear any existing plots and reset color cycle
for plot in self.plots.items():
self.parent.viewbox.removeItem(plot)
self.plots = {}
self._data_cache = defaultdict(list)
self._data_count = defaultdict(int)
self._data_updated = defaultdict(int)
self._data_n_samples = defaultdict(int)
def _observe_groups(self, event):
self.groups.observe('values', self._update_groups)
self._update_groups()
def _update_groups(self, event=None):
self.reset_plots()
self.group_names = [p.name for p in self.groups.values]
if self.source is not None:
self.update()
def get_plots(self):
return []
def _make_new_plot(self, key):
log.info('Adding plot for key %r', key)
try:
pen_color = self.get_pen_color(key)
pen = pg.mkPen(pen_color, width=self.pen_width)
plot = pg.PlotCurveItem(pen=pen, antialias=self.antialias)
deferred_call(self.parent.viewbox.addItem, plot)
self.plots[key] = plot
except KeyError as key_error:
key = key_error.args[0]
m = f'Cannot update plot since a field, {key}, ' \
'required by the plot is missing.'
raise ConfigurationException(m) from key_error
def get_plot(self, key):
if key not in self.plots:
self._make_new_plot(key)
return self.plots[key]
class EpochGroupMixin(GroupMixin):
duration = Float()
def _y(self, epoch):
return np.mean(epoch, axis=0) if len(epoch) \
else np.full_like(self._x, np.nan)
def _update_duration(self, event=None):
self.duration = self.source.duration
def _epochs_acquired(self, epochs):
for d in epochs:
md = d['info']['metadata']
if self.group_filter(md):
signal = d['signal']
key = tuple(md[n] for n in self.group_names)
self._data_cache[key].append(signal)
self._data_count[key] += 1
# Track number of samples
n = max(self._data_n_samples[key], len(signal))
self._data_n_samples[key] = n
# Does at least one epoch need to be updated?
for key, count in self._data_count.items():
if count >= self._data_updated[key] + self.n_update:
n = max(self._data_n_samples.values())
self.duration = n / self.source.fs
self.update()
break
def _observe_source(self, event):
if self.source is not None:
self.source.add_callback(self._epochs_acquired)
self.source.observe('duration', self._update_duration)
self.source.observe('fs', self._cache_x)
self.observe('duration', self._cache_x)
self._reset_plots()
self._cache_x()
def update(self, event=None):
# Update epochs that need updating
todo = []
for key, count in list(self._data_count.items()):
if count >= self._data_updated[key] + self.n_update:
data = self._data_cache[key]
plot = self.get_plot(key)
y = self._y(data)
todo.append((plot.setData, self._x, y))
self._data_updated[key] = len(data)
def update():
for setter, x, y in todo:
setter(x, y)
deferred_call(update)
class GroupedEpochAveragePlot(EpochGroupMixin, BasePlot):
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.duration:
n_time = round(self.source.fs * self.duration)
self._x = np.arange(n_time)/self.source.fs
def _default_name(self):
return self.source_name + '_grouped_epoch_average_plot'
def _observe_source(self, event):
super()._observe_source(event)
if self.source is not None:
self.parent.data_range.add_source(self.source)
class GroupedEpochFFTPlot(EpochGroupMixin, BasePlot):
def _default_name(self):
return self.source_name + '_grouped_epoch_fft_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return self.source.calibration.get_spl(self._x, util.psd(y, self.source.fs))
class GroupedEpochPhasePlot(EpochGroupMixin, BasePlot):
unwrap = d_(Bool(True))
def _default_name(self):
return self.source_name + '_grouped_epoch_phase_plot'
def _cache_x(self, event=None):
# Cache the frequency points. Must be in units of log for PyQtGraph.
# TODO: This could be a utility function stored in the parent?
if self.source.fs and self.duration:
self._x = get_x_fft(self.source.fs, self.duration)
def _y(self, epoch):
y = np.mean(epoch, axis=0) if epoch else np.full_like(self._x, np.nan)
return util.phase(y, self.source.fs, unwrap=self.unwrap)
class StackedEpochAveragePlot(EpochGroupMixin, BasePlot):
_offset_update_needed = Bool(False)
def _make_new_plot(self, key):
super()._make_new_plot(key)
self._offset_update_needed = True
def _update_offsets(self, vb=None):
vb = self.parent.viewbox
height = vb.height()
n = len(self.plots)
for i, (_, plot) in enumerate(sorted(self.plots.items())):
offset = (i+1) * height / (n+1)
point = vb.mapToView(pg.Point(0, offset))
plot.setPos(0, point.y())
def _cache_x(self, event=None):
# Set up the new time axis
if self.source.fs and self.source.duration:
n_time = round(self.source.fs * self.source.duration)
self._x = np.arange(n_time)/self.source.fs
def update(self):
super().update()
if self._offset_update_needed:
deferred_call(self._update_offsets)
self._offset_update_needed = False
def _reset_plots(self):
#super()._reset_plots()
self.parent.viewbox \
.sigRangeChanged.connect(self._update_offsets)
self.parent.viewbox \
.sigRangeChangedManually.connect(self._update_offsets)
################################################################################
# Simple plotters
################################################################################
class ResultPlot(SinglePlot):
x_column = d_(Unicode())
y_column = d_(Unicode())
average = d_(Bool())
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
data_filter = d_(Callable())
_data_cache = Typed(list)
def _default_data_filter(self):
# By default, accept all data points
return lambda x: True
def _default_name(self):
return '.'.join((self.parent.name, self.source_name, 'result_plot',
self.x_column, self.y_column))
def _observe_source(self, event):
if self.source is not None:
self._data_cache = []
self.source.add_callback(self._data_acquired)
def _data_acquired(self, data):
update = False
for d in data:
if self.data_filter(d):
x = d[self.x_column]
y = d[self.y_column]
self._data_cache.append((x, y))
update = True
if update:
self.update()
def update(self, event=None):
if not self._data_cache:
return
x, y = zip(*self._data_cache)
x = np.array(x)
y = np.array(y)
if self.average:
d = pd.DataFrame({'x': x, 'y': y}).groupby('x')['y'].mean()
x = d.index.values
y = d.values
deferred_call(self.plot.setData, x, y)
def _default_plot(self):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = QColor(self.pen_color)
pen = pg.mkPen(color, width=self.pen_width)
brush = pg.mkBrush(color)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
class DataFramePlot(ColorCycleMixin, PSIContribution):
data = d_(Typed(pd.DataFrame))
x_column = d_(Unicode())
y_column = d_(Unicode())
grouping = d_(List(Unicode()))
_plot_cache = Dict()
SYMBOL_MAP = {
'circle': 'o',
'square': 's',
'triangle': 't',
'diamond': 'd',
}
symbol = d_(Enum('circle', 'square', 'triangle', 'diamond'))
symbol_size = d_(Float(10))
symbol_size_unit = d_(Enum('screen', 'data'))
pen_width = d_(Float(0))
antialias = d_(Bool(False))
def _default_name(self):
return '.'.join((self.parent.name, 'result_plot'))
def _observe_x_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_y_column(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_grouping(self, event):
self.reset_plots()
self._observe_data(event)
def _observe_data(self, event):
if self.data is None:
return
if self.x_column not in self.data:
return
if self.y_column not in self.data:
return
todo = []
if self.grouping:
try:
for group, values in self.data.groupby(self.grouping):
if group not in self._plot_cache:
self._plot_cache[group] = self._default_plot(group)
x = values[self.x_column].values
y = values[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[group], x[i], y[i]))
except KeyError as e:
# This is likely triggered when grouping updates an analysis
# before it's ready.
log.warning(e)
return
else:
if None not in self._plot_cache:
self._plot_cache[None] = self._default_plot(None)
x = self.data[self.x_column].values
y = self.data[self.y_column].values
i = np.argsort(x)
todo.append((self._plot_cache[None], x[i], y[i]))
def update():
nonlocal todo
for plot, x, y in todo:
plot.setData(x, y)
deferred_call(update)
def _default_plot(self, group):
symbol_code = self.SYMBOL_MAP[self.symbol]
color = self.get_pen_color(group)
brush = pg.mkBrush(color)
pen = pg.mkPen(color, width=self.pen_width)
plot = pg.PlotDataItem(pen=pen,
antialias=self.antialias,
symbol=symbol_code,
symbolSize=self.symbol_size,
symbolPen=pen,
symbolBrush=brush,
pxMode=self.symbol_size_unit=='screen')
deferred_call(self.parent.add_plot, plot, self.label)
return plot
def reset_plots(self):
for plot in self._plot_cache.values():
deferred_call(self.parent.viewbox.removeItem, plot)
self._plot_cache = {}
def get_plots(self):
return list(self._plot_cache.values())
| mit | 3,576,317,524,640,551,400 | 30.685765 | 84 | 0.559376 | false | 3.785348 | false | false | false |
sdemircan/editobj2 | field_qtopia.py | 1 | 9247 | # -*- coding: utf-8 -*-
# field_gtk.py
# Copyright (C) 2007-2008 Jean-Baptiste LAMY -- [email protected]
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import editobj2
from editobj2.field import *
from editobj2.field import _WithButtonField, _RangeField, _ShortEnumField, _LongEnumField
import qt
class LineEdit(qt.QLineEdit):
def __init__(self, master, on_validate):
qt.QLineEdit.__init__(self, master)
self.on_validate = on_validate
self.connect(self, qt.SIGNAL("returnPressed()"), self.on_validate)
def focusOutEvent(self, e):
qt.QLineEdit.focusOutEvent(self, e)
self.on_validate()
class QtopiaField(MultiGUIField):
y_flags = 0
class QtopiaEntryField(QtopiaField, EntryField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = LineEdit(master.q, self.validate)
super(QtopiaEntryField, self).__init__(gui, master, o, attr, undo_stack)
self.timer = None
self.update()
def validate(self):
print "validate"
s = unicode(self.q.text())
if s != self.old_str:
self.old_str = s
self.set_value(s)
def update(self):
self.updating = 1
try:
self.old_str = self.get_value()
self.q.setText(self.old_str)
finally: self.updating = 0
class QtopiaIntField (QtopiaEntryField, IntField): pass # XXX no "spin-button" since they don't allow entering e.g. "1 + 2" as an integer !
class QtopiaFloatField (QtopiaEntryField, FloatField): pass
class QtopiaStringField(QtopiaEntryField, StringField): pass
class QtopiaPasswordField(QtopiaStringField, PasswordField):
def __init__(self, gui, master, o, attr, undo_stack):
QtopiaStringField.__init__(self, gui, master, o, attr, undo_stack)
self.q.setEchoMode(qt.QLineEdit.Password)
class QtopiaBoolField(QtopiaField, BoolField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QCheckBox(" ", master.q)
super(QtopiaBoolField, self).__init__(gui, master, o, attr, undo_stack)
self.update()
self.q.connect(self.q, qt.SIGNAL("stateChanged(int)"), self.validate)
def validate(self, state):
v = self.descr.get(self.o, self.attr)
if state == 1: self.q.setTristate(0)
elif state == 0:
if isinstance(v, int): self.set_value(0)
else: self.set_value(False)
else:
if isinstance(v, int): self.set_value(1)
else: self.set_value(True)
def update(self):
self.updating = 1
try:
v = self.descr.get(self.o, self.attr)
if v is introsp.NonConsistent:
self.q.setTristate(1)
self.q.setNoChange()
else:
self.q.setChecked(v)
finally:
self.updating = 0
class QtopiaProgressBarField(QtopiaField, ProgressBarField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QProgressBar(master.q)
super(ProgressBarField, self).__init__(gui, master, o, attr, undo_stack)
self.update()
def update(self):
v = self.get_value()
if v is introsp.NonConsistent: self.q.setTotalSteps(0)
else: self.q.setTotalSteps(100); self.q.setProgress(int(v * 100))
class QtopiaEditButtonField(QtopiaField, EditButtonField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QPushButton(editobj2.TRANSLATOR(u"Edit..."), master.q)
super(QtopiaEditButtonField, self).__init__(gui, master, o, attr, undo_stack)
self.q.setAutoDefault(0)
self.q.connect(self.q, qt.SIGNAL("clicked()"), self.on_click)
self.update()
def update(self):
self.q.setEnabled(not self.get_value() is None)
class Qtopia_WithButtonField(QtopiaField, _WithButtonField):
def __init__(self, gui, master, o, attr, undo_stack, Field, button_text, on_button):
self.q = qt.QHBox(master.q)
super(Qtopia_WithButtonField, self).__init__(gui, master, o, attr, undo_stack, Field, button_text, on_button)
button = qt.QPushButton(editobj2.TRANSLATOR(button_text), self.q)
button.setAutoDefault(0)
button.connect(button, qt.SIGNAL("clicked()"), on_button)
class QtopiaWithButtonStringField(QtopiaField, WithButtonStringField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QHBox(master.q)
super(QtopiaWithButtonStringField, self).__init__(gui, master, o, attr, undo_stack)
button = qt.QPushButton(editobj2.TRANSLATOR(self.button_text), self.q)
button.setAutoDefault(0)
button.connect(button, qt.SIGNAL("clicked()"), self.on_button)
class QtopiaFilenameField(QtopiaWithButtonStringField, FilenameField):
def on_button(self):
import editobj2.qtopia_file_chooser
editobj2.qtopia_file_chooser.ask_filename(self.string_field.set_value, self.string_field.get_value())
class QtopiaDirnameField(QtopiaWithButtonStringField, DirnameField):
def on_button(self):
import editobj2.qtopia_file_chooser
editobj2.qtopia_file_chooser.ask_dirname(self.string_field.set_value, self.string_field.get_value())
class QtopiaURLField(QtopiaWithButtonStringField, URLField):
def on_button(self):
import webbrowser
webbrowser.open_new(self.get_value())
class QtopiaTextField(QtopiaField, TextField):
y_flags = 1
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QMultiLineEdit(master.q)
super(QtopiaTextField, self).__init__(gui, master, o, attr, undo_stack)
self.q.connect(self.q, qt.SIGNAL("textChanged()"), self.validate)
self.update()
def validate(self):
s = unicode(self.q.text())
self.set_value(s)
def update(self):
self.updating = 1
try:
self.old_str = self.get_value()
if self.q.text() != self.old_str:
self.q.setText(self.old_str)
finally: self.updating = 0
class QtopiaObjectAttributeField(QtopiaField, ObjectAttributeField):
def __init__(self, gui, master, o, attr, undo_stack):
self.q = qt.QHBox(master.q)
super(QtopiaObjectAttributeField, self).__init__(gui, master, o, attr, undo_stack)
self.q.setFrameShape (qt.QFrame.Box)
self.q.setFrameShadow(qt.QFrame.Sunken)
self.q.setMargin(5)
class Qtopia_RangeField(QtopiaField, _RangeField):
def __init__(self, gui, master, o, attr, undo_stack, min, max, incr = 1):
self.q = qt.QHBox(master.q)
self.q.setSpacing(5)
self.label = qt.QLabel (self.q)
self.slider = qt.QSlider(min, max, 1, 0, qt.QSlider.Horizontal, self.q)
super(Qtopia_RangeField, self).__init__(gui, master, o, attr, undo_stack, min, max, incr)
self.slider.connect(self.slider, qt.SIGNAL("valueChanged(int)"), self.validate)
def validate(self, v):
self.set_value(v)
self.label.setText(str(v))
def update(self):
self.updating = 1
try:
v = self.get_value()
self.slider.setValue(v)
self.label.setText(str(v))
finally: self.updating = 0
class Qtopia_ShortEnumField(QtopiaField, _ShortEnumField):
def __init__(self, gui, master, o, attr, undo_stack, choices, value_2_enum = None, enum_2_value = None):
self.q = qt.QComboBox(master.q)
super(Qtopia_ShortEnumField, self).__init__(gui, master, o, attr, undo_stack, choices, value_2_enum, enum_2_value)
for choice in self.choice_keys: self.q.insertItem(choice)
self.update()
self.q.connect(self.q, qt.SIGNAL("activated(int)"), self.validate)
def validate(self, enum):
i = self.q.currentItem()
self.set_value(self.choices[self.choice_keys[i]])
def update(self):
self.updating = 1
try:
i = self.choice_2_index.get(self.get_value())
if not i is None: self.q.setCurrentItem(i)
else: self.q.setCurrentItem(-1)
finally: self.updating = 0
class Qtopia_LongEnumField(QtopiaField, _LongEnumField):
y_flags = 1
def __init__(self, gui, master, o, attr, undo_stack, choices, value_2_enum = None, enum_2_value = None):
self.q = qt.QListBox(master.q)
super(Qtopia_LongEnumField, self).__init__(gui, master, o, attr, undo_stack, choices, value_2_enum, enum_2_value)
for choice in self.choice_keys: self.q.insertItem(choice)
self.update()
self.q.connect(self.q, qt.SIGNAL("selectionChanged()"), self.validate)
def validate(self):
i = self.q.currentItem()
if i != self.i:
self.i = i
enum = self.choices[self.choice_keys[i]]
self.set_value(enum)
def update(self):
self.updating = 1
try:
self.q.clearSelection()
self.i = self.choice_2_index.get(self.get_value())
if not self.i is None:
self.q.setSelected(self.i, 1)
self.q.ensureCurrentVisible()
finally: self.updating = 0
| gpl-2.0 | 1,841,811,769,113,129,200 | 34.980545 | 141 | 0.667676 | false | 3.168951 | false | false | false |
codeforfrankfurt/PolBotCheck | polbotcheck/word_cluster.py | 1 | 3812 | import json
from sklearn.feature_extraction.text import TfidfVectorizer
import nltk
from nltk.corpus import stopwords
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import db
import os
DATASET_PATH = os.environ['HOME'] + '/nltk_data/corpora/twitter_samples/tweets.20150430-223406.json'
def calc_frequencies(words, words_n=50, lang='german'):
words = [word for word in words if len(word) > 1]
words = [word for word in words if not word.isnumeric()]
words = [word.lower() for word in words]
# words = [word for word in words if word not in all_stopwords]
# Stemming words seems to make matters worse, disabled
# stemmer = nltk.stem.snowball.SnowballStemmer(lang)
# words = [stemmer.stem(word) for word in words]
fdist = nltk.FreqDist(words)
return fdist.most_common(words_n)
def get_word_clouds(tweets, users, words_n=50, lang='english'):
default_stopwords = set(nltk.corpus.stopwords.words(lang))
stopwords_file = '../data/stopwords.txt'
custom_stopwords = set(open(stopwords_file, 'r').read().splitlines())
all_stopwords = default_stopwords | custom_stopwords
vectorizer = TfidfVectorizer(max_df=0.5, min_df=2, stop_words=list(all_stopwords))
X = vectorizer.fit_transform(tweets)
terms = vectorizer.get_feature_names()
word_cloud_per_person = {}
for doc in range(len(tweets)):
feature_index = X[doc, :].nonzero()[1]
tfidf_scores = zip(feature_index, [X[doc, x] for x in feature_index])
doc_terms = []
for word, score in [(terms[i], score) for (i, score) in tfidf_scores]:
doc_terms.append((word, score))
important_terms = [(word, score) for word, score in sorted(doc_terms, key=lambda x: x[1], reverse=True)][:words_n]
word_cloud_per_person[users[doc]] = important_terms
return word_cloud_per_person
def save_wordcloud_image(frequencies, filename):
wordcloud = WordCloud(width=1024, height=786, min_font_size=1).fit_words(frequencies)
fig = plt.figure()
fig.set_figwidth(12)
fig.set_figheight(16)
plt.imshow(wordcloud)
plt.axis("off")
plt.savefig(filename, facecolor='k', bbox_inches='tight')
print('imaged created')
def load_example_data():
tweets = []
with open(DATASET_PATH) as f:
for line in f:
tweets.append(json.loads(line)['text'])
return tweets
def get_corpus_of_most_active_users(n_users=5):
tweets = []
texts = []
with open(DATASET_PATH) as f:
for line in f:
tweets.append(json.loads(line)['user']['screen_name'])
texts.append((json.loads(line)['user']['screen_name'], json.loads(line)['text']))
users = nltk.FreqDist(tweets).most_common(n_users)
dict = {}
for user, tweet in texts:
if user in dict:
dict[user] = " ".join([dict[user],tweet])
else:
dict[user] = tweet
corpus = [dict[name] for name, _ in users]
user_names = [name for name, _ in users]
return corpus, user_names
if __name__ == "__main__":
corpus, users = get_corpus_of_most_active_users()
word_cloud_per_person = get_word_clouds(corpus, users, words_n=100, lang='english')
for user in users:
topic_frequencies = word_cloud_per_person[user]
print user
print topic_frequencies
db.save_word_frequencies('test_user_seb', dict(topic_frequencies))
exit()
# save_wordcloud_image(dict(topic_frequencies), 'plots/word_clouds/' + user + '.png')
# This is an example how to save a word_cloud in the database
# user_in_db = 'malechanissen'
# db.save_word_frequencies(user_in_db, {'w3':10, 'w4':20})
# db.save_word_frequencies(user_in_db, dict(topic_frequencies))
# db.save_word_frequencies('test_user_seb', {'w3':10, 'w4':20})
| mit | -4,804,049,777,207,499,000 | 37.505051 | 122 | 0.651626 | false | 3.230508 | false | false | false |
trentspi/PX8 | examples/plasma/plasma.py | 1 | 17516 | px8 / python cartridge
version 1
__python__
# Original code from rez
# https://www.lexaloffle.com/bbs/?tid=29529
SIZE = 128
A = None
cr = None
cg = None
cb = None
cw = None
def _init():
global SIZE, A, cr, cg, cb, cw
mode(SIZE, SIZE, 1)
cls()
A = SIZE - 1
cr = [0] * SIZE
cg = [0] * SIZE
cb = [0] * SIZE
cw = [0] * SIZE
for i in range(0, SIZE):
cr[i]=sget(i,0)
cg[i]=sget(i,1)
cb[i]=sget(i,2)
cw[i]=sget(i,3)
def _update():
pass
def _draw():
global A, cr, cg, cb, cw
a2 = A * 2
for x in range(3, SIZE, 3):
x2=x/2048
for y in range(3, SIZE, 3):
y2=y/1024
v1,v2=256+192*sin(y2+a2),sin(A-x2+y2)
r,g,b=56*cos(a2+x/v1+v2),48*sin((x+y)/v1*v2),40*cos((x*v2-y)/v1)
pset(x,y,cr[flr(56+r)])
pset(x+1,y,cg[flr(48-g)])
pset(x,y+1,cb[flr(40+b)])
pset(x+1,y+1,cw[flr(24-r+g)])
A+=0.0025
if A>1:
A=0
__gfx__
00000020202222424244448484888898989999a9a9aaaa7a7a7a9842000000002022424484889899a9aa7a7a7aa9a99898848442422020000002489a7a984200
0000001010111151515555d5d5ddddcdcdcccc6c6c6666767676cd510000000010115155d5ddcdcc6c667676766c6ccdcdd5d5515110100000015dc676cd5100
000000202022222252555535353333b3b3bbbb6b6b6666767676b35200000000202252553533b3bb6b667676766b6bb3b335355252202000000253b676b35200
0000000000000050505555d5d5ddddededeeeefefeffff7f7f7fed500000000000005055d5ddedeefeff7f7f7ffefeededd5d5505000000000005def7fed5000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000
| mit | -8,687,394,390,623,362,000 | 94.715847 | 128 | 0.96232 | false | 13.557276 | false | false | false |
memsharded/conan | conans/client/graph/graph_builder.py | 1 | 18724 | import time
from collections import OrderedDict
from conans.client.graph.graph import DepsGraph, Node, RECIPE_EDITABLE
from conans.errors import (ConanException, ConanExceptionInUserConanfileMethod,
conanfile_exception_formatter)
from conans.model.conan_file import get_env_context_manager
from conans.model.ref import ConanFileReference
from conans.model.requires import Requirements, Requirement
from conans.util.log import logger
class DepsGraphBuilder(object):
""" Responsible for computing the dependencies graph DepsGraph
"""
def __init__(self, proxy, output, loader, resolver, recorder):
self._proxy = proxy
self._output = output
self._loader = loader
self._resolver = resolver
self._recorder = recorder
def load_graph(self, root_node, check_updates, update, remotes, processed_profile):
check_updates = check_updates or update
dep_graph = DepsGraph()
# compute the conanfile entry point for this dependency graph
name = root_node.name
root_node.public_closure = OrderedDict([(name, root_node)])
root_node.public_deps = {name: root_node}
root_node.ancestors = set()
dep_graph.add_node(root_node)
# enter recursive computation
t1 = time.time()
self._load_deps(dep_graph, root_node, Requirements(), None, None,
check_updates, update, remotes,
processed_profile)
logger.debug("GRAPH: Time to load deps %s" % (time.time() - t1))
return dep_graph
def extend_build_requires(self, graph, node, build_requires_refs, check_updates, update,
remotes, processed_profile):
# The options that will be defined in the node will be the real options values that have
# been already propagated downstream from the dependency graph. This will override any
# other possible option in the build_requires dependency graph. This means that in theory
# an option conflict while expanding the build_requires is impossible
node.conanfile.build_requires_options.clear_unscoped_options()
new_options = node.conanfile.build_requires_options._reqs_options
new_reqs = Requirements()
conanfile = node.conanfile
scope = conanfile.display_name
requires = [Requirement(ref) for ref in build_requires_refs]
self._resolve_ranges(graph, requires, scope, update, remotes)
for require in requires:
name = require.ref.name
require.build_require = True
self._handle_require(name, node, require, graph, check_updates, update,
remotes, processed_profile, new_reqs, new_options)
new_nodes = set(n for n in graph.nodes if n.package_id is None)
# This is to make sure that build_requires have precedence over the normal requires
ordered_closure = list(node.public_closure.items())
ordered_closure.sort(key=lambda x: x[1] not in new_nodes)
node.public_closure = OrderedDict(ordered_closure)
subgraph = DepsGraph()
subgraph.aliased = graph.aliased
subgraph.evaluated = graph.evaluated
subgraph.nodes = new_nodes
for n in subgraph.nodes:
n.build_require = True
return subgraph
def _resolve_ranges(self, graph, requires, consumer, update, remotes):
for require in requires:
self._resolver.resolve(require, consumer, update, remotes)
# if the range is resolved, check if it is an alias
alias = graph.aliased.get(require.ref)
if alias:
require.ref = alias
def _resolve_deps(self, graph, node, update, remote_name):
# Resolve possible version ranges of the current node requirements
# new_reqs is a shallow copy of what is propagated upstream, so changes done by the
# RangeResolver are also done in new_reqs, and then propagated!
conanfile = node.conanfile
scope = conanfile.display_name
self._resolve_ranges(graph, conanfile.requires.values(), scope, update, remote_name)
if not hasattr(conanfile, "_conan_evaluated_requires"):
conanfile._conan_evaluated_requires = conanfile.requires.copy()
elif conanfile.requires != conanfile._conan_evaluated_requires:
raise ConanException("%s: Incompatible requirements obtained in different "
"evaluations of 'requirements'\n"
" Previous requirements: %s\n"
" New requirements: %s"
% (scope, list(conanfile._conan_evaluated_requires.values()),
list(conanfile.requires.values())))
def _load_deps(self, dep_graph, node, down_reqs, down_ref, down_options,
check_updates, update, remotes, processed_profile):
""" expands the dependencies of the node, recursively
param node: Node object to be expanded in this step
down_reqs: the Requirements as coming from downstream, which can overwrite current
values
param down_ref: ConanFileReference of who is depending on current node for this expansion
"""
# basic node configuration: calling configure() and requirements()
new_reqs, new_options = self._config_node(dep_graph, node, down_reqs, down_ref, down_options)
# if there are version-ranges, resolve them before expanding each of the requirements
self._resolve_deps(dep_graph, node, update, remotes)
# Expand each one of the current requirements
for name, require in node.conanfile.requires.items():
if require.override:
continue
self._handle_require(name, node, require, dep_graph, check_updates, update,
remotes, processed_profile, new_reqs, new_options)
def _handle_require(self, name, node, require, dep_graph, check_updates, update,
remotes, processed_profile, new_reqs, new_options):
# Handle a requirement of a node. There are 2 possibilities
# node -(require)-> new_node (creates a new node in the graph)
# node -(require)-> previous (creates a diamond with a previously existing node)
# If the required is found in the node ancestors a loop is being closed
# TODO: allow bootstrapping, use references instead of names
if name in node.ancestors or name == node.name:
raise ConanException("Loop detected: '%s' requires '%s' which is an ancestor too"
% (node.ref, require.ref))
# If the requirement is found in the node public dependencies, it is a diamond
previous = node.public_deps.get(name)
previous_closure = node.public_closure.get(name)
# build_requires and private will create a new node if it is not in the current closure
if not previous or ((require.build_require or require.private) and not previous_closure):
# new node, must be added and expanded (node -> new_node)
new_node = self._create_new_node(node, dep_graph, require, name, check_updates, update,
remotes, processed_profile)
# The closure of a new node starts with just itself
new_node.public_closure = OrderedDict([(new_node.ref.name, new_node)])
# The new created node is connected to the parent one
node.connect_closure(new_node)
if require.private or require.build_require:
# If the requirement is private (or build_require), a new public_deps is defined
# the new_node doesn't propagate downstream the "node" consumer, so its public_deps
# will be a copy of the node.public_closure, i.e. it can only cause conflicts in the
# new_node.public_closure.
new_node.public_deps = node.public_closure.copy()
new_node.public_deps[name] = new_node
else:
# Normal requires propagate and can conflict with the parent "node.public_deps" too
new_node.public_deps = node.public_deps.copy()
new_node.public_deps[name] = new_node
# All the dependents of "node" are also connected now to "new_node"
for dep_node in node.inverse_closure:
dep_node.connect_closure(new_node)
# RECURSION, keep expanding (depth-first) the new node
self._load_deps(dep_graph, new_node, new_reqs, node.ref, new_options, check_updates,
update, remotes, processed_profile)
else: # a public node already exist with this name
# This is closing a diamond, the node already exists and is reachable
alias_ref = dep_graph.aliased.get(require.ref)
# Necessary to make sure that it is pointing to the correct aliased
if alias_ref:
require.ref = alias_ref
# As we are closing a diamond, there can be conflicts. This will raise if conflicts
self._conflicting_references(previous.ref, require.ref, node.ref)
# Add current ancestors to the previous node and upstream deps
union = node.ancestors.union([node.name])
for n in previous.public_closure.values():
n.ancestors.update(union)
# Even if it was in private scope, if it is reached via a public require
# the previous node and its upstream becomes public
if previous.private and not require.private:
previous.make_public()
node.connect_closure(previous)
dep_graph.add_edge(node, previous, require.private, require.build_require)
# All the upstream dependencies (public_closure) of the previously existing node
# now will be also connected to the node and to all its dependants
for name, n in previous.public_closure.items():
if n.build_require or n.private:
continue
node.connect_closure(n)
for dep_node in node.inverse_closure:
dep_node.connect_closure(n)
# Recursion is only necessary if the inputs conflict with the current "previous"
# configuration of upstream versions and options
if self._recurse(previous.public_closure, new_reqs, new_options):
self._load_deps(dep_graph, previous, new_reqs, node.ref, new_options, check_updates,
update, remotes, processed_profile)
@staticmethod
def _conflicting_references(previous_ref, new_ref, consumer_ref=None):
if previous_ref.copy_clear_rev() != new_ref.copy_clear_rev():
if consumer_ref:
raise ConanException("Conflict in %s\n"
" Requirement %s conflicts with already defined %s\n"
" To change it, override it in your base requirements"
% (consumer_ref, new_ref, previous_ref))
return True
# Computed node, if is Editable, has revision=None
# If new_ref.revision is None we cannot assume any conflict, the user hasn't specified
# a revision, so it's ok any previous_ref
if previous_ref.revision and new_ref.revision and previous_ref.revision != new_ref.revision:
if consumer_ref:
raise ConanException("Conflict in %s\n"
" Different revisions of %s has been requested"
% (consumer_ref, new_ref))
return True
return False
def _recurse(self, closure, new_reqs, new_options):
""" For a given closure, if some requirements or options coming from downstream
is incompatible with the current closure, then it is necessary to recurse
then, incompatibilities will be raised as usually"""
for req in new_reqs.values():
n = closure.get(req.ref.name)
if n and self._conflicting_references(n.ref, req.ref):
return True
for pkg_name, options_values in new_options.items():
n = closure.get(pkg_name)
if n:
options = n.conanfile.options
for option, value in options_values.items():
if getattr(options, option) != value:
return True
return False
def _config_node(self, graph, node, down_reqs, down_ref, down_options):
""" update settings and option in the current ConanFile, computing actual
requirement values, cause they can be overridden by downstream requires
param settings: dict of settings values => {"os": "windows"}
"""
try:
conanfile, ref = node.conanfile, node.ref
# Avoid extra time manipulating the sys.path for python
with get_env_context_manager(conanfile, without_python=True):
if hasattr(conanfile, "config"):
if not ref:
conanfile.output.warn("config() has been deprecated."
" Use config_options and configure")
with conanfile_exception_formatter(str(conanfile), "config"):
conanfile.config()
with conanfile_exception_formatter(str(conanfile), "config_options"):
conanfile.config_options()
conanfile.options.propagate_upstream(down_options, down_ref, ref)
if hasattr(conanfile, "config"):
with conanfile_exception_formatter(str(conanfile), "config"):
conanfile.config()
with conanfile_exception_formatter(str(conanfile), "configure"):
conanfile.configure()
conanfile.settings.validate() # All has to be ok!
conanfile.options.validate()
# Update requirements (overwrites), computing new upstream
if hasattr(conanfile, "requirements"):
# If re-evaluating the recipe, in a diamond graph, with different options,
# it could happen that one execution path of requirements() defines a package
# and another one a different package raising Duplicate dependency error
# Or the two consecutive calls, adding 2 different dependencies for the two paths
# So it is necessary to save the "requires" state and restore it before a second
# execution of requirements(). It is a shallow copy, if first iteration is
# RequireResolve'd or overridden, the inner requirements are modified
if not hasattr(conanfile, "_conan_original_requires"):
conanfile._conan_original_requires = conanfile.requires.copy()
else:
conanfile.requires = conanfile._conan_original_requires.copy()
with conanfile_exception_formatter(str(conanfile), "requirements"):
conanfile.requirements()
new_options = conanfile.options.deps_package_values
if graph.aliased:
for req in conanfile.requires.values():
req.ref = graph.aliased.get(req.ref, req.ref)
new_down_reqs = conanfile.requires.update(down_reqs, self._output, ref, down_ref)
except ConanExceptionInUserConanfileMethod:
raise
except ConanException as e:
raise ConanException("%s: %s" % (ref or "Conanfile", str(e)))
except Exception as e:
raise ConanException(e)
return new_down_reqs, new_options
def _create_new_node(self, current_node, dep_graph, requirement, name_req,
check_updates, update, remotes, processed_profile, alias_ref=None):
""" creates and adds a new node to the dependency graph
"""
try:
result = self._proxy.get_recipe(requirement.ref, check_updates, update,
remotes, self._recorder)
except ConanException as e:
if current_node.ref:
self._output.error("Failed requirement '%s' from '%s'"
% (requirement.ref,
current_node.conanfile.display_name))
raise e
conanfile_path, recipe_status, remote, new_ref = result
dep_conanfile = self._loader.load_conanfile(conanfile_path, processed_profile,
ref=requirement.ref)
if recipe_status == RECIPE_EDITABLE:
dep_conanfile.in_local_cache = False
dep_conanfile.develop = True
if getattr(dep_conanfile, "alias", None):
alias_ref = alias_ref or new_ref.copy_clear_rev()
requirement.ref = ConanFileReference.loads(dep_conanfile.alias)
dep_graph.aliased[alias_ref] = requirement.ref
return self._create_new_node(current_node, dep_graph, requirement,
name_req, check_updates, update,
remotes, processed_profile,
alias_ref=alias_ref)
logger.debug("GRAPH: new_node: %s" % str(new_ref))
new_node = Node(new_ref, dep_conanfile)
new_node.revision_pinned = requirement.ref.revision is not None
new_node.recipe = recipe_status
new_node.remote = remote
# Ancestors are a copy of the parent, plus the parent itself
new_node.ancestors = current_node.ancestors.copy()
new_node.ancestors.add(current_node.name)
# build-requires and private affect transitively. If "node" is already
# a build_require or a private one, its requirements will inherit that property
# Or if the require specify that property, then it will get it too
new_node.build_require = current_node.build_require or requirement.build_require
new_node.private = current_node.private or requirement.private
dep_graph.add_node(new_node)
dep_graph.add_edge(current_node, new_node, requirement.private, requirement.build_require)
return new_node
| mit | 1,260,864,511,600,322,000 | 52.497143 | 101 | 0.605319 | false | 4.49988 | true | false | false |
ianrenton/TelegraphFantasyFootballTeamPicker | telegraphpicker.py | 1 | 19193 | #!/usr/bin/python
# -*- coding: cp1252 -*-
# Telegraph Fantasy Football Team Picker
# version 1.2.1 (11 March 2011)
# by Ian Renton and Mark Harris
# For details, see http://www.onlydreaming.net/software/telegraph-fantasy-football-team-picker
# This code is released under the GPLv3 licence (http://www.gnu.org/licenses/gpl.html).
# Takes player data from the TFF website, and picks the optimum team based
# on players' past performance and current injuries.
import re
import datetime
print "Content-Type: text/html\n\n"
# Port of MATLAB's nchoosek (unique combination) function.
def nchoosek(items, n):
if n==0: yield []
else:
for (i, item) in enumerate(items):
for cc in nchoosek(items[i+1:],n-1):
yield [item]+cc
# Works out the position a given player number maps to.
def calculatePosition(number):
if ((number < 2000) & (number >= 1000)):
return "Goalkeeper"
elif ((number < 3000) & (number >= 2000)):
return "Defender"
elif ((number < 4000) & (number >= 3000)):
return "Midfielder"
elif ((number < 5000) & (number >= 4000)):
return "Striker"
def cutDownPlayerPointsHTML(html):
goalkeepersStart = re.compile("<div class='pla-list' id='list-GK'><table>").search(html)
goalkeepersEnd = re.compile("</table>").search(html[goalkeepersStart.start():len(html)])
goalkeepersText = html[goalkeepersStart.start():goalkeepersStart.start()+goalkeepersEnd.end()]
defendersStart = re.compile("<div class='pla-list' id='list-DEF'><table>").search(html)
defendersEnd = re.compile("</table>").search(html[defendersStart.start():len(html)])
defendersText = html[defendersStart.start():defendersStart.start()+defendersEnd.end()]
midfieldersStart = re.compile("<div class='pla-list' id='list-MID'><table>").search(html)
midfieldersEnd = re.compile("</table>").search(html[midfieldersStart.start():len(html)])
midfieldersText = html[midfieldersStart.start():midfieldersStart.start()+midfieldersEnd.end()]
strikersStart = re.compile("<div class='pla-list' id='list-STR'><table>").search(html)
strikersEnd = re.compile("</table>").search(html[strikersStart.start():len(html)])
strikersText = html[strikersStart.start():strikersStart.start()+strikersEnd.end()]
return goalkeepersText + defendersText + midfieldersText + strikersText
def extractFields(text):
textIndex = 0
arrayIndex = 0
interestingThings = []
while textIndex < len(text):
try:
# Extract data between <tr> and </tr>. This will get an individual player's line.
startPos = re.compile("<tr\s?[^>]*>").search(text[textIndex:len(text)])
endPos = re.compile("</tr>").search(text[textIndex+startPos.end():textIndex+startPos.start()+1000])
thisItem = text[textIndex+startPos.start():textIndex+startPos.end()+endPos.end()]
# Extract the ID field
idStartPos = re.compile("id=\'p").search(thisItem)
idEndPos = re.compile("\'").search(thisItem[idStartPos.end():len(thisItem)])
interestingThings.append(thisItem[idStartPos.end():idStartPos.end()+idEndPos.end()-1])
innerIndex = 0
while innerIndex < len(thisItem):
try:
# Extract data between <td> and </td>. This will get the individual cells.
innerStartPos = re.compile("<td>").search(thisItem[innerIndex:len(thisItem)])
innerEndPos = re.compile("</td>").search(thisItem[innerIndex+innerStartPos.end():len(thisItem)])
innerItem = thisItem[innerIndex+innerStartPos.end():innerIndex+innerStartPos.end()+innerEndPos.start()]
innerIndex = innerIndex + innerStartPos.end() + innerEndPos.end()
interestingThings.append(innerItem)
arrayIndex += 1
except:
break
textIndex = textIndex+startPos.end()+endPos.end()
except:
break
return interestingThings
class Player:
def __init__(self, row):
self.number = int(row[0])
self.name = row[1]
self.team = row[2]
self.points = int(row[3])
self.price = round(float(row[4]), 1)
self.value = self.points / self.price
self.position = calculatePosition(self.number)
def __str__(self):
return '<tr><td><p>%4s</p></td><td><p>%-20s</p></td><td><p>%-20s</p></td><td><p>%4s</p></td><td><p>%4s</p></td></tr>' % (self.number, self.name, self.team, self.price, self.points)
class TeamPicker:
def __init__(self):
self.process()
def set_initial_text(self):
# Print header
introText = "<h2>Optimum Telegraph Fantasy Football Team</h2><p style=\"font-weight:bold\">Generated on " + datetime.datetime.now().strftime("%A %d %B %Y at %H:%M:%S.") + "</p>"
introText = introText + "<p>Created using Telegraph Fantasy Football Team Picker, version 1.2.1 (11 March 2011), by Ian Renton and Mark Harris.<br>"
introText = introText + "For details and source code, see <a href=\"http://www.onlydreaming.net/software/telegraph-fantasy-football-team-picker\">http://www.onlydreaming.net/software/telegraph-fantasy-football-team-picker</a></p>"
self.displayUpdate(introText)
def displayUpdate(self, line):
self.f.write(line)
def process(self):
import urllib2
import re
from collections import defaultdict
try:
urllib2.urlopen('http://www.google.com')
except urllib2.URLError, e:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Internet connection failed.</p>')
internetConnectionAvailable = False
else:
internetConnectionAvailable = True
if internetConnectionAvailable == True:
# Download the HTML file, and create a 'tmpData' list to contain the information.
try:
response = urllib2.urlopen('http://fantasyfootball.telegraph.co.uk/select-team/')
html = response.read()
except IOError, e:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Could not find the player list, maybe the URL has changed?</p>')
return
else:
pass
else:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Using a local mirror of the player list.</p>')
# Load the HTML file, and create a 'tmpData' list to contain the information.
try:
tmpFile = open("export.html","r")
html = tmpFile.read()
tmpFile.close()
except IOError, e:
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<p style="font-weight:bold">Cannot continue.</p>')
return
else:
pass
# Process the HTML into Players
fields = extractFields(cutDownPlayerPointsHTML(html))
tmpData = []
for i in range(len(fields)/7):
# If Points field is blank, replace it with a zero.
if (fields[i*7+5] == ""):
fields[i*7+5] = 0
# Add player (ID, name, club, points, price)
tmpData.append(Player([fields[i*7],fields[i*7+1],fields[i*7+2],fields[i*7+5],fields[i*7+3]]))
# Extra features if we have a net connection
if internetConnectionAvailable == True:
# Fetch injury list from PhysioRoom
response = urllib2.urlopen('http://www.physioroom.com/news/english_premier_league/epl_injury_table.php')
injuryList = response.read()
# Remove injured players
tmpData = filter(lambda player : re.compile(player.name).search(injuryList)==None, tmpData)
# Fetch transfer password from RichardSweeting.org
response = urllib2.urlopen('http://www.richardsweeting.org/pages/telegraph.html')
passwordPage = response.read()
# Find the Wednesday's date and the password.
try:
match = re.compile("<p style=\"padding-top: 0pt; \" class=\"paragraph_style_1\">[^\n]*\n").search(passwordPage)
match2 = re.compile("[^<]*<").search(passwordPage[match.start()+56:match.end()])
wednesday = passwordPage[match.start()+56:match.start()+56+match2.end()-1]
except:
wednesday = "???"
try:
match = re.compile("\*\*\* [A-Za-z]* \*\*\*").search(passwordPage)
password = passwordPage[match.start()+4:match.end()-4]
except:
password = "Unknown (Could not parse page, visit <a href=\"http://www.richardsweeting.org/pages/telegraph.html\">http://www.richardsweeting.org/pages/telegraph.html</a> to check manually.)"
transferPasswordInfo = "<p>Transfer password for %s: %s</p>" % (wednesday, password)
else:
pass
# Split data into four separate lists, one for each kind of player.
players = defaultdict(list)
for player in tmpData:
players[player.position].append(player)
# Produce a set of thresholds for VFM and overall price. This allows us to cut
# down the list of players to only those that are good value for money or
# particularly high-scoring. This mirrors human behaviour, where the user
# picks some very high-scoring (but expensive) players, then fills out the rest
# of the team with cheap but good-value players.
# These thresholds are necessary to reduce the number of players being considered,
# as otherwise the number of combinations that the script must consider would be
# too large for the script to run in sensible time.
thresholdDivisor = 1.6
sensibleDataSet = 0
while (sensibleDataSet == 0):
points = lambda player: player.points
valueForMoney = lambda player: player.value
pointThresholds = defaultdict(float)
valueThresholds = defaultdict(float)
for position in players.keys():
pointThresholds[position] = max(players[position], key=points).points / thresholdDivisor
valueThresholds[position] = max(players[position], key=valueForMoney).value / thresholdDivisor
# This section applies the thresholds calculated in the previous one, to cut down
# the number of players.
for position in players.keys():
players[position] = filter(lambda x : ((x.points > pointThresholds[position]) | (x.value > valueThresholds[position])), players[position])
# Using a function to pick unique combinations of players, we here form a list of
# all possible combinations: 1 2 3 4, 1 2 3 5, 1 2 3 6 and so on. Because there
# are multiple formations to choose from, we have to do this several times.
defenderChoices3 = list(nchoosek(players["Defender"],3))
defenderChoices4 = list(nchoosek(players["Defender"],4))
# Now the same for the midfielders.
midfielderChoices3 = list(nchoosek(players["Midfielder"],3))
midfielderChoices4 = list(nchoosek(players["Midfielder"],4))
midfielderChoices5 = list(nchoosek(players["Midfielder"],5))
# And now the same for the strikers.
strikerChoices1 = list(nchoosek(players["Striker"],1))
strikerChoices2 = list(nchoosek(players["Striker"],2))
strikerChoices3 = list(nchoosek(players["Striker"],3))
# If we have too many iterations to be possible in sensible time, go back and reduce
# thresholdDivisor until we have something sensible. Assume the 442 formation is pretty representative.
totalIterations = len(defenderChoices4) * len(midfielderChoices4) * len(strikerChoices2)
print thresholdDivisor
print totalIterations
if (totalIterations <= 3000000):
sensibleDataSet = 1
else:
n = 0.1
if (thresholdDivisor < 2.8):
n = 0.05
if (thresholdDivisor < 1.8):
n = 0.05
if (thresholdDivisor < 1.3):
n = 0.025
thresholdDivisor = thresholdDivisor - n
# To reduce the number of combinations, we just pick the one goalkeeper
# who provides best value for money rather than searching through them all.
players["Goalkeeper"].sort(lambda x, y: cmp(y.value, x.value))
goalkeeper = players["Goalkeeper"][0]
# For each combination of defenders, we calculate their combined price
# and combined points totals.
# Create two functions that, given a list of permutations of players, will return a list of prices of those players in the same order.
# Er... I guess if you're not up on your functional programming, this must look a bit hideous...
prices = lambda permutations: reduce(lambda total, player: total + player.price, permutations, 0)
points = lambda permutations: reduce(lambda total, player: total + player.points, permutations, 0)
#Sorry! Having those simplifies the next bit dramatically though:
defChoicesPrice3 = map(prices, defenderChoices3)
defChoicesPoints3 = map(points, defenderChoices3)
defChoicesPrice4 = map(prices, defenderChoices4)
defChoicesPoints4 = map(points, defenderChoices4)
# Same for the midfielders.
midChoicesPrice3 = map(prices, midfielderChoices3)
midChoicesPoints3 = map(points, midfielderChoices3)
midChoicesPrice4 = map(prices, midfielderChoices4)
midChoicesPoints4 = map(points, midfielderChoices4)
midChoicesPrice5 = map(prices, midfielderChoices5)
midChoicesPoints5 = map(points, midfielderChoices5)
# Same for the strikers.
strChoicesPrice1 = map(prices, strikerChoices1)
strChoicesPoints1 = map(points, strikerChoices1)
strChoicesPrice2 = map(prices, strikerChoices2)
strChoicesPoints2 = map(points, strikerChoices2)
strChoicesPrice3 = map(prices, strikerChoices3)
strChoicesPoints3 = map(points, strikerChoices3)
# Now we iterate through all possible choices for defenders, midfielders and
# strikers. In each case, we check to see if this set is better than the one
# before, and if so we record it. First, the 442 team.
bestTotalPoints = 0
bestChoices = []
bestFormation = 0
maxPrice = 50 - goalkeeper.price
# 442
for (i, defs) in enumerate(defenderChoices4):
for (j, mids) in enumerate(midfielderChoices4):
for (k, strs) in enumerate(strikerChoices2):
if ((defChoicesPrice4[i] + midChoicesPrice4[j] + strChoicesPrice2[k]) <= maxPrice):
teamPoints = (defChoicesPoints4[i] + midChoicesPoints4[j] + strChoicesPoints2[k])
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# 433
for (i, defs) in enumerate(defenderChoices4):
for (j, mids) in enumerate(midfielderChoices3):
for (k, strs) in enumerate(strikerChoices3):
if ((defChoicesPrice4[i] + midChoicesPrice3[j] + strChoicesPrice3[k]) <= maxPrice):
teamPoints = defChoicesPoints4[i] + midChoicesPoints3[j] + strChoicesPoints3[k]
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# 451
for (i, defs) in enumerate(defenderChoices4):
for (j, mids) in enumerate(midfielderChoices5):
for (k, strs) in enumerate(strikerChoices1):
if ((defChoicesPrice4[i] + midChoicesPrice5[j] + strChoicesPrice1[k]) <= maxPrice):
teamPoints = defChoicesPoints4[i] + midChoicesPoints5[j] + strChoicesPoints1[k]
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# 352
for (i, defs) in enumerate(defenderChoices3):
for (j, mids) in enumerate(midfielderChoices5):
for (k, strs) in enumerate(strikerChoices2):
if ((defChoicesPrice3[i] + midChoicesPrice5[j] + strChoicesPrice2[k]) <= maxPrice):
teamPoints = defChoicesPoints3[i] + midChoicesPoints5[j] + strChoicesPoints2[k]
if (teamPoints > bestTotalPoints):
bestTotalPoints = teamPoints
(bestDefs, bestMids, bestStrs) = (defs, mids, strs)
# Calculate optimum team's total price.
bestTotalPrice = goalkeeper.price
for p in bestDefs:
bestTotalPrice += p.price
for p in bestMids:
bestTotalPrice += p.price
for p in bestStrs:
bestTotalPrice += p.price
# Print the optimum team's details.
self.f = open('./output.html', 'w')
self.set_initial_text()
self.displayUpdate('<table width="500px" border="1" cellspacing="2">')
self.displayUpdate('<tr><td><p><b>ID</b></p></td><td><p><b>Name</b></p></td><td><p><b>Club</b></p></td><td><p><b>Price</b></p></td><td><p><b>Points</b></p></td></tr>')
self.displayUpdate('<tr><td colspan=5><p><b>Goalkeeper</b></p></td></tr>')
self.displayUpdate( str(goalkeeper))
self.displayUpdate('<tr><td colspan=5><p><b>Defenders</b></p></td></tr>')
self.displayUpdate( ''.join(map(str, bestDefs)))
self.displayUpdate('<tr><td colspan=5><p><b>Midfielders</b></p></td></tr>')
self.displayUpdate(''.join(map(str, bestMids)))
self.displayUpdate('<tr><td colspan=5><p><b>Strikers</b></p></td></tr>')
self.displayUpdate(''.join(map(str, bestStrs)))
self.displayUpdate('<tr><td colspan=3><p><b>Total</b></p></td><td><p><b>%4s</b></p></td><td><p><b>%4s</b></p></td></tr>' % (bestTotalPrice, bestTotalPoints))
self.displayUpdate('</table>')
self.displayUpdate(transferPasswordInfo)
self.f.close()
print "<p><a href=\"output.html\">output.html</a> successfully generated.</p>"
return 0
teampicker = TeamPicker()
| bsd-2-clause | -450,705,566,761,338,500 | 48.981771 | 238 | 0.602355 | false | 3.808135 | false | false | false |
H-Software/Zabbix-II | zabbix-templates/ibm-storwize-perf/scripts/svc_perf_discovery_sender_zabbix.py | 1 | 4012 | #!/usr/bin/python
# -*- coding: utf-8 -*- # coding: utf-8
#
# IBM Storwize V7000 autodiscovery script for Zabbix
#
# 2013 Matvey Marinin
#
# Sends volume/mdisk/pool LLD JSON data to LLD trapper items "svc.discovery.<volume-mdisk|volume|mdisk|pool>"
# Use with "_Special_Storwize_Perf" Zabbix template
#
# See also http://www.zabbix.com/documentation/2.0/manual/discovery/low_level_discovery
#
# Usage:
# svc_perf_discovery_sender.py [--debug] --clusters <svc1>[,<svc2>...] --user <username> --password <pwd>
#
# --debug = Enable debug output
# --clusters = Comma-separated Storwize node list
# --user = Storwize V7000 user account with Administrator role (it seems that Monitor role is not enough)
# --password = User password
#
import pywbem
import getopt, sys
from zbxsend import Metric, send_to_zabbix
import logging
def usage():
print >> sys.stderr, "Usage: svc_perf_discovery_sender_zabbix.py [--debug] --clusters <svc1>[,<svc2>...] --user <username> --password <pwd> --discovery-types <type1>,[type2]"
print >> sys.stderr, "Discovery types: 'volume-mdisk','volume','mdisk','pool'"
try:
opts, args = getopt.gnu_getopt(sys.argv[1:], "-h", ["help", "clusters=", "user=", "password=", "debug", "discovery-types="])
except getopt.GetoptError, err:
print >> sys.stderr, str(err)
usage()
sys.exit(2)
debug = False
clusters = []
DISCOVERY_TYPES = []
user = None
password = None
for o, a in opts:
if o == "--clusters" and not a.startswith('--'):
clusters.extend( a.split(','))
elif o == "--user" and not a.startswith('--'):
user = a
elif o == "--password" and not a.startswith('--'):
password = a
elif o == "--debug":
debug = True
elif o == "--discovery-types":
DISCOVERY_TYPES.extend( a.split(','))
elif o in ("-h", "--help"):
usage()
sys.exit()
if not clusters:
print >> sys.stderr, '--clusters option must be set'
usage()
sys.exit(2)
if not DISCOVERY_TYPES:
print >> sys.stderr, '--discovery-types option must be set'
usage()
sys.exit(2)
if not user or not password:
print >> sys.stderr, '--user and --password options must be set'
usage()
sys.exit(2)
def debug_print(message):
if debug:
print message
for cluster in clusters:
debug_print('Connecting to: %s' % cluster)
conn = pywbem.WBEMConnection('https://'+cluster, (user, password), 'root/ibm')
conn.debug = True
for discovery in DISCOVERY_TYPES:
output = []
if discovery == 'volume-mdisk' or discovery == 'volume':
for vol in conn.ExecQuery('WQL', 'select DeviceID, ElementName from IBMTSSVC_StorageVolume'):
output.append( '{"{#TYPE}":"%s", "{#NAME}":"%s", "{#ID}":"%s"}' % ('volume', vol.properties['ElementName'].value, vol.properties['DeviceID'].value) )
if discovery == 'volume-mdisk' or discovery == 'mdisk':
for mdisk in conn.ExecQuery('WQL', 'select DeviceID, ElementName from IBMTSSVC_BackendVolume'):
output.append( '{"{#TYPE}":"%s", "{#NAME}":"%s", "{#ID}":"%s"}' % ('mdisk', mdisk.properties['ElementName'].value, mdisk.properties['DeviceID'].value) )
if discovery == 'pool':
for pool in conn.ExecQuery('WQL', 'select PoolID, ElementName from IBMTSSVC_ConcreteStoragePool'):
output.append( '{"{#TYPE}":"%s","{#NAME}":"%s","{#ID}":"%s"}' % ('pool', pool.properties['ElementName'].value, pool.properties['PoolID'].value) )
json = []
json.append('{"data":[')
for i, v in enumerate( output ):
if i < len(output)-1:
json.append(v+',')
else:
json.append(v)
json.append(']}')
json_string = ''.join(json)
print(json_string)
trapper_key = 'svc.discovery.%s' % discovery
debug_print('Sending to host=%s, key=%s' % (cluster, trapper_key))
#send json to LLD trapper item with zbxsend module
if debug:
logging.basicConfig(level=logging.INFO)
else:
logging.basicConfig(level=logging.WARNING)
send_to_zabbix([Metric(cluster, trapper_key, json_string)], 'localhost', 10051)
debug_print('')
| gpl-2.0 | 5,975,721,742,574,174,000 | 31.617886 | 176 | 0.639083 | false | 3.207034 | false | false | false |
rven/odoo | odoo/fields.py | 1 | 165522 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" High-level objects for fields. """
from collections import defaultdict
from datetime import date, datetime, time
from operator import attrgetter
from xmlrpc.client import MAXINT
import itertools
import logging
import base64
import binascii
import pytz
import psycopg2
from .tools import (
float_repr, float_round, float_compare, float_is_zero, html_sanitize, human_size,
pg_varchar, ustr, OrderedSet, pycompat, sql, date_utils, unique, IterableGenerator,
image_process, merge_sequences,
)
from .tools import DEFAULT_SERVER_DATE_FORMAT as DATE_FORMAT
from .tools import DEFAULT_SERVER_DATETIME_FORMAT as DATETIME_FORMAT
from .tools.translate import html_translate, _
from .tools.mimetypes import guess_mimetype
from odoo.exceptions import CacheMiss
DATE_LENGTH = len(date.today().strftime(DATE_FORMAT))
DATETIME_LENGTH = len(datetime.now().strftime(DATETIME_FORMAT))
# hacky-ish way to prevent access to a field through the ORM (except for sudo mode)
NO_ACCESS='.'
IR_MODELS = (
'ir.model', 'ir.model.data', 'ir.model.fields', 'ir.model.fields.selection',
'ir.model.relation', 'ir.model.constraint', 'ir.module.module',
)
_logger = logging.getLogger(__name__)
_schema = logging.getLogger(__name__[:-7] + '.schema')
Default = object() # default value for __init__() methods
def first(records):
""" Return the first record in ``records``, with the same prefetching. """
return next(iter(records)) if len(records) > 1 else records
def resolve_mro(model, name, predicate):
""" Return the list of successively overridden values of attribute ``name``
in mro order on ``model`` that satisfy ``predicate``. Model classes
(the ones that appear in the registry) are ignored.
"""
result = []
for cls in model._model_classes:
value = cls.__dict__.get(name, Default)
if value is Default:
continue
if not predicate(value):
break
result.append(value)
return result
class MetaField(type):
""" Metaclass for field classes. """
by_type = {}
def __init__(cls, name, bases, attrs):
super(MetaField, cls).__init__(name, bases, attrs)
if not hasattr(cls, 'type'):
return
if cls.type and cls.type not in MetaField.by_type:
MetaField.by_type[cls.type] = cls
# compute class attributes to avoid calling dir() on fields
cls.related_attrs = []
cls.description_attrs = []
for attr in dir(cls):
if attr.startswith('_related_'):
cls.related_attrs.append((attr[9:], attr))
elif attr.startswith('_description_'):
cls.description_attrs.append((attr[13:], attr))
_global_seq = iter(itertools.count())
class Field(MetaField('DummyField', (object,), {})):
"""The field descriptor contains the field definition, and manages accesses
and assignments of the corresponding field on records. The following
attributes may be provided when instanciating a field:
:param str string: the label of the field seen by users; if not
set, the ORM takes the field name in the class (capitalized).
:param str help: the tooltip of the field seen by users
:param invisible: whether the field is invisible (boolean, by default ``False``)
:param bool readonly: whether the field is readonly (default: ``False``)
This only has an impact on the UI. Any field assignation in code will work
(if the field is a stored field or an inversable one).
:param bool required: whether the value of the field is required (default: ``False``)
:param bool index: whether the field is indexed in database. Note: no effect
on non-stored and virtual fields. (default: ``False``)
:param default: the default value for the field; this is either a static
value, or a function taking a recordset and returning a value; use
``default=None`` to discard default values for the field
:type default: value or callable
:param dict states: a dictionary mapping state values to lists of UI attribute-value
pairs; possible attributes are: ``readonly``, ``required``, ``invisible``.
.. warning:: Any state-based condition requires the ``state`` field value to be
available on the client-side UI. This is typically done by including it in
the relevant views, possibly made invisible if not relevant for the
end-user.
:param str groups: comma-separated list of group xml ids (string); this
restricts the field access to the users of the given groups only
:param bool company_dependent: whether the field value is dependent of the current company;
The value isn't stored on the model table. It is registered as `ir.property`.
When the value of the company_dependent field is needed, an `ir.property`
is searched, linked to the current company (and current record if one property
exists).
If the value is changed on the record, it either modifies the existing property
for the current record (if one exists), or creates a new one for the current company
and res_id.
If the value is changed on the company side, it will impact all records on which
the value hasn't been changed.
:param bool copy: whether the field value should be copied when the record
is duplicated (default: ``True`` for normal fields, ``False`` for
``one2many`` and computed fields, including property fields and
related fields)
:param bool store: whether the field is stored in database
(default:``True``, ``False`` for computed fields)
:param str group_operator: aggregate function used by :meth:`~odoo.models.Model.read_group`
when grouping on this field.
Supported aggregate functions are:
* ``array_agg`` : values, including nulls, concatenated into an array
* ``count`` : number of rows
* ``count_distinct`` : number of distinct rows
* ``bool_and`` : true if all values are true, otherwise false
* ``bool_or`` : true if at least one value is true, otherwise false
* ``max`` : maximum value of all values
* ``min`` : minimum value of all values
* ``avg`` : the average (arithmetic mean) of all values
* ``sum`` : sum of all values
:param str group_expand: function used to expand read_group results when grouping on
the current field.
.. code-block:: python
@api.model
def _read_group_selection_field(self, values, domain, order):
return ['choice1', 'choice2', ...] # available selection choices.
@api.model
def _read_group_many2one_field(self, records, domain, order):
return records + self.search([custom_domain])
.. rubric:: Computed Fields
:param str compute: name of a method that computes the field
.. seealso:: :ref:`Advanced Fields/Compute fields <reference/fields/compute>`
:param bool compute_sudo: whether the field should be recomputed as superuser
to bypass access rights (by default ``True`` for stored fields, ``False``
for non stored fields)
:param str inverse: name of a method that inverses the field (optional)
:param str search: name of a method that implement search on the field (optional)
:param str related: sequence of field names
.. seealso:: :ref:`Advanced fields/Related fields <reference/fields/related>`
"""
type = None # type of the field (string)
relational = False # whether the field is a relational one
translate = False # whether the field is translated
column_type = None # database column type (ident, spec)
column_format = '%s' # placeholder for value in queries
column_cast_from = () # column types that may be cast to this
args = None # the parameters given to __init__()
_module = None # the field's module name
_modules = None # modules that define this field
_setup_done = None # the field's setup state: None, 'base' or 'full'
_sequence = None # absolute ordering of the field
automatic = False # whether the field is automatically created ("magic" field)
inherited = False # whether the field is inherited (_inherits)
inherited_field = None # the corresponding inherited field
name = None # name of the field
model_name = None # name of the model of this field
comodel_name = None # name of the model of values (if relational)
store = True # whether the field is stored in database
index = False # whether the field is indexed in database
manual = False # whether the field is a custom field
copy = True # whether the field is copied over by BaseModel.copy()
_depends = None # collection of field dependencies
_depends_context = None # collection of context key dependencies
recursive = False # whether self depends on itself
compute = None # compute(recs) computes field on recs
compute_sudo = False # whether field should be recomputed as superuser
inverse = None # inverse(recs) inverses field on recs
search = None # search(recs, operator, value) searches on self
related = None # sequence of field names, for related fields
company_dependent = False # whether ``self`` is company-dependent (property field)
default = None # default(recs) returns the default value
string = None # field label
help = None # field tooltip
invisible = False # whether the field is invisible
readonly = False # whether the field is readonly
required = False # whether the field is required
states = None # set readonly and required depending on state
groups = None # csv list of group xml ids
change_default = False # whether the field may trigger a "user-onchange"
deprecated = None # whether the field is deprecated
related_field = None # corresponding related field
group_operator = None # operator for aggregating values
group_expand = None # name of method to expand groups in read_group()
prefetch = True # whether the field is prefetched
def __init__(self, string=Default, **kwargs):
kwargs['string'] = string
self._sequence = kwargs['_sequence'] = next(_global_seq)
self.args = {key: val for key, val in kwargs.items() if val is not Default}
def new(self, **kwargs):
""" Return a field of the same type as ``self``, with its own parameters. """
return type(self)(**kwargs)
def __str__(self):
return "%s.%s" % (self.model_name, self.name)
def __repr__(self):
return "%s.%s" % (self.model_name, self.name)
############################################################################
#
# Base field setup: things that do not depend on other models/fields
#
def setup_base(self, model, name):
""" Base setup: things that do not depend on other models/fields. """
if self._setup_done and not self.related:
# optimization for regular fields: keep the base setup
self._setup_done = 'base'
else:
# do the base setup from scratch
self._setup_attrs(model, name)
if not self.related:
self._setup_regular_base(model)
self._setup_done = 'base'
#
# Setup field parameter attributes
#
def _can_setup_from(self, field):
""" Return whether ``self`` can retrieve parameters from ``field``. """
return isinstance(field, type(self))
def _get_attrs(self, model, name):
""" Return the field parameter attributes as a dictionary. """
# determine all inherited field attributes
modules = set()
attrs = {}
if self.args.get('automatic') and resolve_mro(model, name, self._can_setup_from):
# prevent an automatic field from overriding a real field
self.args.clear()
if not (self.args.get('automatic') or self.args.get('manual')):
# magic and custom fields do not inherit from parent classes
for field in reversed(resolve_mro(model, name, self._can_setup_from)):
attrs.update(field.args)
if '_module' in field.args:
modules.add(field.args['_module'])
attrs.update(self.args) # necessary in case self is not in class
attrs['args'] = self.args
attrs['model_name'] = model._name
attrs['name'] = name
attrs['_modules'] = modules
# initialize ``self`` with ``attrs``
if name == 'state':
# by default, `state` fields should be reset on copy
attrs['copy'] = attrs.get('copy', False)
if attrs.get('compute'):
# by default, computed fields are not stored, computed in superuser
# mode if stored, not copied (unless stored and explicitly not
# readonly), and readonly (unless inversible)
attrs['store'] = store = attrs.get('store', False)
attrs['compute_sudo'] = attrs.get('compute_sudo', store)
if not (attrs['store'] and not attrs.get('readonly', True)):
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', not attrs.get('inverse'))
if attrs.get('related'):
# by default, related fields are not stored, computed in superuser
# mode, not copied and readonly
attrs['store'] = store = attrs.get('store', False)
attrs['compute_sudo'] = attrs.get('compute_sudo', attrs.get('related_sudo', True))
attrs['copy'] = attrs.get('copy', False)
attrs['readonly'] = attrs.get('readonly', True)
if attrs.get('company_dependent'):
# by default, company-dependent fields are not stored, not computed
# in superuser mode and not copied
attrs['store'] = False
attrs['compute_sudo'] = attrs.get('compute_sudo', False)
attrs['copy'] = attrs.get('copy', False)
attrs['default'] = attrs.get('default', self._default_company_dependent)
attrs['compute'] = self._compute_company_dependent
if not attrs.get('readonly'):
attrs['inverse'] = self._inverse_company_dependent
attrs['search'] = self._search_company_dependent
attrs['depends_context'] = attrs.get('depends_context', ()) + ('company',)
if attrs.get('translate'):
# by default, translatable fields are context-dependent
attrs['depends_context'] = attrs.get('depends_context', ()) + ('lang',)
# parameters 'depends' and 'depends_context' are stored in attributes
# '_depends' and '_depends_context', respectively
if 'depends' in attrs:
attrs['_depends'] = tuple(attrs.pop('depends'))
if 'depends_context' in attrs:
attrs['_depends_context'] = tuple(attrs.pop('depends_context'))
return attrs
def _setup_attrs(self, model, name):
""" Initialize the field parameter attributes. """
attrs = self._get_attrs(model, name)
# validate arguments
for key in attrs:
# TODO: improve filter as there are attributes on the class which
# are not valid on the field, probably
if not (hasattr(self, key) or model._valid_field_parameter(self, key)):
_logger.warning(
"Field %s.%s: unknown parameter %r, if this is an actual"
" parameter you may want to override the method"
" _valid_field_parameter on the relevant model in order to"
" allow it",
model._name, name, key
)
self.__dict__.update(attrs)
# prefetch only stored, column, non-manual and non-deprecated fields
if not (self.store and self.column_type) or self.manual or self.deprecated:
self.prefetch = False
if not self.string and not self.related:
# related fields get their string from their parent field
self.string = (
name[:-4] if name.endswith('_ids') else
name[:-3] if name.endswith('_id') else name
).replace('_', ' ').title()
# self.default must be a callable
if self.default is not None:
value = self.default
self.default = value if callable(value) else lambda model: value
############################################################################
#
# Full field setup: everything else, except recomputation triggers
#
def setup_full(self, model):
""" Full setup: everything else, except recomputation triggers. """
if self._setup_done != 'full':
if not self.related:
self._setup_regular_full(model)
else:
self._setup_related_full(model)
self._setup_done = 'full'
#
# Setup of non-related fields
#
def _setup_regular_base(self, model):
""" Setup the attributes of a non-related field. """
pass
def _setup_regular_full(self, model):
""" Determine the dependencies and inverse field(s) of ``self``. """
if self._depends is not None:
# the parameter 'depends' has priority over 'depends' on compute
self.depends = self._depends
self.depends_context = self._depends_context or ()
return
# determine the functions implementing self.compute
if isinstance(self.compute, str):
funcs = resolve_mro(model, self.compute, callable)
elif self.compute:
funcs = [self.compute]
else:
funcs = []
# collect depends and depends_context
depends = []
depends_context = list(self._depends_context or ())
for func in funcs:
deps = getattr(func, '_depends', ())
depends.extend(deps(model) if callable(deps) else deps)
depends_context.extend(getattr(func, '_depends_context', ()))
self.depends = tuple(depends)
self.depends_context = tuple(depends_context)
# display_name may depend on context['lang'] (`test_lp1071710`)
if self.automatic and self.name == 'display_name' and model._rec_name:
if model._fields[model._rec_name].base_field.translate:
if 'lang' not in self.depends_context:
self.depends_context += ('lang',)
#
# Setup of related fields
#
def _setup_related_full(self, model):
""" Setup the attributes of a related field. """
# fix the type of self.related if necessary
if isinstance(self.related, str):
self.related = tuple(self.related.split('.'))
# determine the chain of fields, and make sure they are all set up
model_name = self.model_name
for name in self.related:
field = model.pool[model_name]._fields[name]
if field._setup_done != 'full':
field.setup_full(model.env[model_name])
model_name = field.comodel_name
self.related_field = field
# check type consistency
if self.type != field.type:
raise TypeError("Type of related field %s is inconsistent with %s" % (self, field))
# determine dependencies, compute, inverse, and search
if self._depends is not None:
self.depends = self._depends
else:
self.depends = ('.'.join(self.related),)
self.compute = self._compute_related
if self.inherited or not (self.readonly or field.readonly):
self.inverse = self._inverse_related
if field._description_searchable:
# allow searching on self only if the related field is searchable
self.search = self._search_related
# copy attributes from field to self (string, help, etc.)
for attr, prop in self.related_attrs:
if not getattr(self, attr):
setattr(self, attr, getattr(field, prop))
for attr, value in field.__dict__.items():
if not hasattr(self, attr) and model._valid_field_parameter(self, attr):
setattr(self, attr, value)
# special cases of inherited fields
if self.inherited:
if not self.states:
self.states = field.states
if field.required:
self.required = True
self._modules.update(field._modules)
if self._depends_context is not None:
self.depends_context = self._depends_context
else:
self.depends_context = field.depends_context
def traverse_related(self, record):
""" Traverse the fields of the related field `self` except for the last
one, and return it as a pair `(last_record, last_field)`. """
for name in self.related[:-1]:
record = first(record[name])
return record, self.related_field
def _compute_related(self, records):
""" Compute the related field ``self`` on ``records``. """
#
# Traverse fields one by one for all records, in order to take advantage
# of prefetching for each field access. In order to clarify the impact
# of the algorithm, consider traversing 'foo.bar' for records a1 and a2,
# where 'foo' is already present in cache for a1, a2. Initially, both a1
# and a2 are marked for prefetching. As the commented code below shows,
# traversing all fields one record at a time will fetch 'bar' one record
# at a time.
#
# b1 = a1.foo # mark b1 for prefetching
# v1 = b1.bar # fetch/compute bar for b1
# b2 = a2.foo # mark b2 for prefetching
# v2 = b2.bar # fetch/compute bar for b2
#
# On the other hand, traversing all records one field at a time ensures
# maximal prefetching for each field access.
#
# b1 = a1.foo # mark b1 for prefetching
# b2 = a2.foo # mark b2 for prefetching
# v1 = b1.bar # fetch/compute bar for b1, b2
# v2 = b2.bar # value already in cache
#
# This difference has a major impact on performance, in particular in
# the case where 'bar' is a computed field that takes advantage of batch
# computation.
#
values = list(records)
for name in self.related[:-1]:
try:
values = [first(value[name]) for value in values]
except AccessError as e:
description = records.env['ir.model']._get(records._name).name
raise AccessError(
_("%(previous_message)s\n\nImplicitly accessed through '%(document_kind)s' (%(document_model)s).") % {
'previous_message': e.args[0],
'document_kind': description,
'document_model': records._name,
}
)
# assign final values to records
for record, value in zip(records, values):
record[self.name] = self._process_related(value[self.related_field.name])
def _process_related(self, value):
"""No transformation by default, but allows override."""
return value
def _inverse_related(self, records):
""" Inverse the related field ``self`` on ``records``. """
# store record values, otherwise they may be lost by cache invalidation!
record_value = {record: record[self.name] for record in records}
for record in records:
target, field = self.traverse_related(record)
# update 'target' only if 'record' and 'target' are both real or
# both new (see `test_base_objects.py`, `test_basic`)
if target and bool(target.id) == bool(record.id):
target[field.name] = record_value[record]
def _search_related(self, records, operator, value):
""" Determine the domain to search on field ``self``. """
return [('.'.join(self.related), operator, value)]
# properties used by _setup_related_full() to copy values from related field
_related_comodel_name = property(attrgetter('comodel_name'))
_related_string = property(attrgetter('string'))
_related_help = property(attrgetter('help'))
_related_groups = property(attrgetter('groups'))
_related_group_operator = property(attrgetter('group_operator'))
@property
def base_field(self):
""" Return the base field of an inherited field, or ``self``. """
return self.inherited_field.base_field if self.inherited_field else self
#
# Company-dependent fields
#
def _default_company_dependent(self, model):
return model.env['ir.property']._get(self.name, self.model_name)
def _compute_company_dependent(self, records):
# read property as superuser, as the current user may not have access
Property = records.env['ir.property'].sudo()
values = Property._get_multi(self.name, self.model_name, records.ids)
for record in records:
record[self.name] = values.get(record.id)
def _inverse_company_dependent(self, records):
# update property as superuser, as the current user may not have access
Property = records.env['ir.property'].sudo()
values = {
record.id: self.convert_to_write(record[self.name], record)
for record in records
}
Property._set_multi(self.name, self.model_name, values)
def _search_company_dependent(self, records, operator, value):
Property = records.env['ir.property'].sudo()
return Property.search_multi(self.name, self.model_name, operator, value)
#
# Setup of field triggers
#
def resolve_depends(self, registry):
""" Return the dependencies of `self` as a collection of field tuples. """
Model0 = registry[self.model_name]
for dotnames in self.depends:
field_seq = []
model_name = self.model_name
for index, fname in enumerate(dotnames.split('.')):
Model = registry[model_name]
if Model0._transient and not Model._transient:
# modifying fields on regular models should not trigger
# recomputations of fields on transient models
break
try:
field = Model._fields[fname]
except KeyError:
msg = "Field %s cannot find dependency %r on model %r."
raise ValueError(msg % (self, fname, model_name))
if field is self and index:
self.recursive = True
field_seq.append(field)
# do not make self trigger itself: for instance, a one2many
# field line_ids with domain [('foo', ...)] will have
# 'line_ids.foo' as a dependency
if not (field is self and not index):
yield tuple(field_seq)
if field.type in ('one2many', 'many2many'):
for inv_field in Model._field_inverses[field]:
yield tuple(field_seq) + (inv_field,)
model_name = field.comodel_name
############################################################################
#
# Field description
#
def get_description(self, env):
""" Return a dictionary that describes the field ``self``. """
desc = {'type': self.type}
for attr, prop in self.description_attrs:
value = getattr(self, prop)
if callable(value):
value = value(env)
if value is not None:
desc[attr] = value
return desc
# properties used by get_description()
_description_store = property(attrgetter('store'))
_description_manual = property(attrgetter('manual'))
_description_depends = property(attrgetter('depends'))
_description_related = property(attrgetter('related'))
_description_company_dependent = property(attrgetter('company_dependent'))
_description_readonly = property(attrgetter('readonly'))
_description_required = property(attrgetter('required'))
_description_states = property(attrgetter('states'))
_description_groups = property(attrgetter('groups'))
_description_change_default = property(attrgetter('change_default'))
_description_deprecated = property(attrgetter('deprecated'))
_description_group_operator = property(attrgetter('group_operator'))
@property
def _description_searchable(self):
return bool(self.store or self.search)
@property
def _description_sortable(self):
return (self.column_type and self.store) or (self.inherited and self.related_field._description_sortable)
def _description_string(self, env):
if self.string and env.lang:
model_name = self.base_field.model_name
field_string = env['ir.translation'].get_field_string(model_name)
return field_string.get(self.name) or self.string
return self.string
def _description_help(self, env):
if self.help and env.lang:
model_name = self.base_field.model_name
field_help = env['ir.translation'].get_field_help(model_name)
return field_help.get(self.name) or self.help
return self.help
def is_editable(self):
""" Return whether the field can be editable in a view. """
return not self.readonly or self.states and any(
'readonly' in item for items in self.states.values() for item in items
)
############################################################################
#
# Conversion of values
#
def null(self, record):
""" Return the null value for this field in the record format. """
return False
def convert_to_column(self, value, record, values=None, validate=True):
""" Convert ``value`` from the ``write`` format to the SQL format. """
if value is None or value is False:
return None
return pycompat.to_text(value)
def convert_to_cache(self, value, record, validate=True):
""" Convert ``value`` to the cache format; ``value`` may come from an
assignment, or have the format of methods :meth:`BaseModel.read` or
:meth:`BaseModel.write`. If the value represents a recordset, it should
be added for prefetching on ``record``.
:param bool validate: when True, field-specific validation of ``value``
will be performed
"""
return value
def convert_to_record(self, value, record):
""" Convert ``value`` from the cache format to the record format.
If the value represents a recordset, it should share the prefetching of
``record``.
"""
return False if value is None else value
def convert_to_record_multi(self, values, records):
""" Convert a list of values from the cache format to the record format.
Some field classes may override this method to add optimizations for
batch processing.
"""
# spare the method lookup overhead
convert = self.convert_to_record
return [convert(value, records) for value in values]
def convert_to_read(self, value, record, use_name_get=True):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.read`.
:param bool use_name_get: when True, the value's display name will be
computed using :meth:`BaseModel.name_get`, if relevant for the field
"""
return False if value is None else value
def convert_to_write(self, value, record):
""" Convert ``value`` from any format to the format of method
:meth:`BaseModel.write`.
"""
cache_value = self.convert_to_cache(value, record, validate=False)
record_value = self.convert_to_record(cache_value, record)
return self.convert_to_read(record_value, record)
def convert_to_onchange(self, value, record, names):
""" Convert ``value`` from the record format to the format returned by
method :meth:`BaseModel.onchange`.
:param names: a tree of field names (for relational fields only)
"""
return self.convert_to_read(value, record)
def convert_to_export(self, value, record):
""" Convert ``value`` from the record format to the export format. """
if not value:
return ''
return value
def convert_to_display_name(self, value, record):
""" Convert ``value`` from the record format to a suitable display name. """
return ustr(value)
############################################################################
#
# Update database schema
#
def update_db(self, model, columns):
""" Update the database schema to implement this field.
:param model: an instance of the field's model
:param columns: a dict mapping column names to their configuration in database
:return: ``True`` if the field must be recomputed on existing rows
"""
if not self.column_type:
return
column = columns.get(self.name)
# create/update the column, not null constraint; the index will be
# managed by registry.check_indexes()
self.update_db_column(model, column)
self.update_db_notnull(model, column)
# optimization for computing simple related fields like 'foo_id.bar'
if (
not column
and len(self.related or ()) == 2
and self.related_field.store and not self.related_field.compute
and not (self.related_field.type == 'binary' and self.related_field.attachment)
and self.related_field.type not in ('one2many', 'many2many')
):
join_field = model._fields[self.related[0]]
if (
join_field.type == 'many2one'
and join_field.store and not join_field.compute
):
model.pool.post_init(self.update_db_related, model)
# discard the "classical" computation
return False
return not column
def update_db_column(self, model, column):
""" Create/update the column corresponding to ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
if not column:
# the column does not exist, create it
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
return
if column['udt_name'] == self.column_type[0]:
return
if column['udt_name'] in self.column_cast_from:
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
else:
newname = (self.name + '_moved{}').format
i = 0
while sql.column_exists(model._cr, model._table, newname(i)):
i += 1
if column['is_nullable'] == 'NO':
sql.drop_not_null(model._cr, model._table, self.name)
sql.rename_column(model._cr, model._table, self.name, newname(i))
sql.create_column(model._cr, model._table, self.name, self.column_type[1], self.string)
def update_db_notnull(self, model, column):
""" Add or remove the NOT NULL constraint on ``self``.
:param model: an instance of the field's model
:param column: the column's configuration (dict) if it exists, or ``None``
"""
has_notnull = column and column['is_nullable'] == 'NO'
if not column or (self.required and not has_notnull):
# the column is new or it becomes required; initialize its values
if model._table_has_rows():
model._init_column(self.name)
if self.required and not has_notnull:
# _init_column may delay computations in post-init phase
@model.pool.post_init
def add_not_null():
# flush values before adding NOT NULL constraint
model.flush([self.name])
model.pool.post_constraint(apply_required, model, self.name)
elif not self.required and has_notnull:
sql.drop_not_null(model._cr, model._table, self.name)
def update_db_related(self, model):
""" Compute a stored related field directly in SQL. """
comodel = model.env[self.related_field.model_name]
model.env.cr.execute("""
UPDATE "{model_table}" AS x
SET "{model_field}" = y."{comodel_field}"
FROM "{comodel_table}" AS y
WHERE x."{join_field}" = y.id
""".format(
model_table=model._table,
model_field=self.name,
comodel_table=comodel._table,
comodel_field=self.related[1],
join_field=self.related[0],
))
############################################################################
#
# Alternatively stored fields: if fields don't have a `column_type` (not
# stored as regular db columns) they go through a read/create/write
# protocol instead
#
def read(self, records):
""" Read the value of ``self`` on ``records``, and store it in cache. """
raise NotImplementedError("Method read() undefined on %s" % self)
def create(self, record_values):
""" Write the value of ``self`` on the given records, which have just
been created.
:param record_values: a list of pairs ``(record, value)``, where
``value`` is in the format of method :meth:`BaseModel.write`
"""
for record, value in record_values:
self.write(record, value)
def write(self, records, value):
""" Write the value of ``self`` on ``records``. This method must update
the cache and prepare database updates.
:param value: a value in any format
:return: the subset of `records` that have been modified
"""
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
cache.update(records, self, [cache_value] * len(records))
# update towrite
if self.store:
towrite = records.env.all.towrite[self.model_name]
record = records[:1]
write_value = self.convert_to_write(cache_value, record)
column_value = self.convert_to_column(write_value, record)
for record in records.filtered('id'):
towrite[record.id][self.name] = column_value
return records
############################################################################
#
# Descriptor methods
#
def __get__(self, record, owner):
""" return the value of field ``self`` on ``record`` """
if record is None:
return self # the field is accessed through the owner class
if not record._ids:
# null record -> return the null value for this field
value = self.convert_to_cache(False, record, validate=False)
return self.convert_to_record(value, record)
env = record.env
# only a single record may be accessed
record.ensure_one()
if self.compute and self.store:
# process pending computations
self.recompute(record)
try:
value = env.cache.get(record, self)
except KeyError:
# behavior in case of cache miss:
#
# on a real record:
# stored -> fetch from database (computation done above)
# not stored and computed -> compute
# not stored and not computed -> default
#
# on a new record w/ origin:
# stored and not (computed and readonly) -> fetch from origin
# stored and computed and readonly -> compute
# not stored and computed -> compute
# not stored and not computed -> default
#
# on a new record w/o origin:
# stored and computed -> compute
# stored and not computed -> new delegate or default
# not stored and computed -> compute
# not stored and not computed -> default
#
if self.store and record.id:
# real record: fetch from database
recs = record._in_cache_without(self)
try:
recs._fetch_field(self)
except AccessError:
record._fetch_field(self)
if not env.cache.contains(record, self) and not record.exists():
raise MissingError("\n".join([
_("Record does not exist or has been deleted."),
_("(Record: %s, User: %s)") % (record, env.uid),
]))
value = env.cache.get(record, self)
elif self.store and record._origin and not (self.compute and self.readonly):
# new record with origin: fetch from origin
value = self.convert_to_cache(record._origin[self.name], record)
env.cache.set(record, self, value)
elif self.compute:
# non-stored field or new record without origin: compute
if env.is_protected(self, record):
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
else:
recs = record if self.recursive else record._in_cache_without(self)
try:
self.compute_value(recs)
except (AccessError, MissingError):
self.compute_value(record)
try:
value = env.cache.get(record, self)
except CacheMiss:
if self.readonly and not self.store:
raise ValueError("Compute method failed to assign %s.%s" % (record, self.name))
# fallback to null value if compute gives nothing
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
elif self.type == 'many2one' and self.delegate and not record.id:
# parent record of a new record: new record, with the same
# values as record for the corresponding inherited fields
def is_inherited_field(name):
field = record._fields[name]
return field.inherited and field.related[0] == self.name
parent = record.env[self.comodel_name].new({
name: value
for name, value in record._cache.items()
if is_inherited_field(name)
})
value = self.convert_to_cache(parent, record)
env.cache.set(record, self, value)
else:
# non-stored field or stored field on new record: default value
value = self.convert_to_cache(False, record, validate=False)
env.cache.set(record, self, value)
defaults = record.default_get([self.name])
if self.name in defaults:
# The null value above is necessary to convert x2many field
# values. For instance, converting [(4, id)] accesses the
# field's current value, then adds the given id. Without an
# initial value, the conversion ends up here to determine
# the field's value, and generates an infinite recursion.
value = self.convert_to_cache(defaults[self.name], record)
env.cache.set(record, self, value)
return self.convert_to_record(value, record)
def mapped(self, records):
""" Return the values of ``self`` for ``records``, either as a list
(scalar fields), or as a recordset (relational fields).
This method is meant to be used internally and has very little benefit
over a simple call to `~odoo.models.BaseModel.mapped()` on a recordset.
"""
if self.name == 'id':
# not stored in cache
return list(records._ids)
if self.compute and self.store:
# process pending computations
self.recompute(records)
# retrieve values in cache, and fetch missing ones
vals = records.env.cache.get_until_miss(records, self)
while len(vals) < len(records):
# It is important to construct a 'remaining' recordset with the
# _prefetch_ids of the original recordset, in order to prefetch as
# many records as possible. If not done this way, scenarios such as
# [rec.line_ids.mapped('name') for rec in recs] would generate one
# query per record in `recs`!
remaining = records._browse(records.env, records[len(vals):]._ids, records._prefetch_ids)
self.__get__(first(remaining), type(remaining))
vals += records.env.cache.get_until_miss(remaining, self)
return self.convert_to_record_multi(vals, records)
def __set__(self, records, value):
""" set the value of field ``self`` on ``records`` """
protected_ids = []
new_ids = []
other_ids = []
for record_id in records._ids:
if record_id in records.env._protected.get(self, ()):
protected_ids.append(record_id)
elif not record_id:
new_ids.append(record_id)
else:
other_ids.append(record_id)
if protected_ids:
# records being computed: no business logic, no recomputation
protected_records = records.browse(protected_ids)
self.write(protected_records, value)
if new_ids:
# new records: no business logic
new_records = records.browse(new_ids)
with records.env.protecting(records.pool.field_computed.get(self, [self]), records):
if self.relational:
new_records.modified([self.name], before=True)
self.write(new_records, value)
new_records.modified([self.name])
if self.inherited:
# special case: also assign parent records if they are new
parents = records[self.related[0]]
parents.filtered(lambda r: not r.id)[self.name] = value
if other_ids:
# base case: full business logic
records = records.browse(other_ids)
write_value = self.convert_to_write(value, records)
records.write({self.name: write_value})
############################################################################
#
# Computation of field values
#
def recompute(self, records):
""" Process the pending computations of ``self`` on ``records``. This
should be called only if ``self`` is computed and stored.
"""
to_compute_ids = records.env.all.tocompute.get(self)
if not to_compute_ids:
return
if self.recursive:
for record in records:
if record.id in to_compute_ids:
self.compute_value(record)
return
for record in records:
if record.id in to_compute_ids:
ids = expand_ids(record.id, to_compute_ids)
recs = record.browse(itertools.islice(ids, PREFETCH_MAX))
try:
self.compute_value(recs)
except (AccessError, MissingError):
self.compute_value(record)
def compute_value(self, records):
""" Invoke the compute method on ``records``; the results are in cache. """
env = records.env
if self.compute_sudo:
records = records.sudo()
fields = records.pool.field_computed[self]
# Just in case the compute method does not assign a value, we already
# mark the computation as done. This is also necessary if the compute
# method accesses the old value of the field: the field will be fetched
# with _read(), which will flush() it. If the field is still to compute,
# the latter flush() will recursively compute this field!
for field in fields:
if field.store:
env.remove_to_compute(field, records)
try:
with records.env.protecting(fields, records):
records._compute_field_value(self)
except Exception:
for field in fields:
if field.store:
env.add_to_compute(field, records)
raise
def determine_inverse(self, records):
""" Given the value of ``self`` on ``records``, inverse the computation. """
if isinstance(self.inverse, str):
getattr(records, self.inverse)()
else:
self.inverse(records)
def determine_domain(self, records, operator, value):
""" Return a domain representing a condition on ``self``. """
if isinstance(self.search, str):
return getattr(records, self.search)(operator, value)
else:
return self.search(records, operator, value)
############################################################################
#
# Notification when fields are modified
#
class Boolean(Field):
""" Encapsulates a :class:`bool`. """
type = 'boolean'
column_type = ('bool', 'bool')
def convert_to_column(self, value, record, values=None, validate=True):
return bool(value)
def convert_to_cache(self, value, record, validate=True):
return bool(value)
def convert_to_export(self, value, record):
return value
class Integer(Field):
""" Encapsulates an :class:`int`. """
type = 'integer'
column_type = ('int4', 'int4')
group_operator = 'sum'
def convert_to_column(self, value, record, values=None, validate=True):
return int(value or 0)
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, dict):
# special case, when an integer field is used as inverse for a one2many
return value.get('id', None)
return int(value or 0)
def convert_to_record(self, value, record):
return value or 0
def convert_to_read(self, value, record, use_name_get=True):
# Integer values greater than 2^31-1 are not supported in pure XMLRPC,
# so we have to pass them as floats :-(
if value and value > MAXINT:
return float(value)
return value
def _update(self, records, value):
# special case, when an integer field is used as inverse for a one2many
cache = records.env.cache
for record in records:
cache.set(record, self, value.id or 0)
def convert_to_export(self, value, record):
if value or value == 0:
return value
return ''
class Float(Field):
""" Encapsulates a :class:`float`.
The precision digits are given by the (optional) ``digits`` attribute.
:param digits: a pair (total, decimal) or a string referencing a
:class:`~odoo.addons.base.models.decimal_precision.DecimalPrecision` record name.
:type digits: tuple(int,int) or str
When a float is a quantity associated with an unit of measure, it is important
to use the right tool to compare or round values with the correct precision.
The Float class provides some static methods for this purpose:
:func:`~odoo.fields.Float.round()` to round a float with the given precision.
:func:`~odoo.fields.Float.is_zero()` to check if a float equals zero at the given precision.
:func:`~odoo.fields.Float.compare()` to compare two floats at the given precision.
.. admonition:: Example
To round a quantity with the precision of the unit of mesure::
fields.Float.round(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To check if the quantity is zero with the precision of the unit of mesure::
fields.Float.is_zero(self.product_uom_qty, precision_rounding=self.product_uom_id.rounding)
To compare two quantities::
field.Float.compare(self.product_uom_qty, self.qty_done, precision_rounding=self.product_uom_id.rounding)
The compare helper uses the __cmp__ semantics for historic purposes, therefore
the proper, idiomatic way to use this helper is like so:
if result == 0, the first and second floats are equal
if result < 0, the first float is lower than the second
if result > 0, the first float is greater than the second
"""
type = 'float'
column_cast_from = ('int4', 'numeric', 'float8')
_digits = None # digits argument passed to class initializer
group_operator = 'sum'
def __init__(self, string=Default, digits=Default, **kwargs):
super(Float, self).__init__(string=string, _digits=digits, **kwargs)
@property
def column_type(self):
# Explicit support for "falsy" digits (0, False) to indicate a NUMERIC
# field with no fixed precision. The values are saved in the database
# with all significant digits.
# FLOAT8 type is still the default when there is no precision because it
# is faster for most operations (sums, etc.)
return ('numeric', 'numeric') if self._digits is not None else \
('float8', 'double precision')
def get_digits(self, env):
if isinstance(self._digits, str):
precision = env['decimal.precision'].precision_get(self._digits)
return 16, precision
else:
return self._digits
_related__digits = property(attrgetter('_digits'))
def _description_digits(self, env):
return self.get_digits(env)
def convert_to_column(self, value, record, values=None, validate=True):
result = float(value or 0.0)
digits = self.get_digits(record.env)
if digits:
precision, scale = digits
result = float_repr(float_round(result, precision_digits=scale), precision_digits=scale)
return result
def convert_to_cache(self, value, record, validate=True):
# apply rounding here, otherwise value in cache may be wrong!
value = float(value or 0.0)
if not validate:
return value
digits = self.get_digits(record.env)
return float_round(value, precision_digits=digits[1]) if digits else value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_export(self, value, record):
if value or value == 0.0:
return value
return ''
round = staticmethod(float_round)
is_zero = staticmethod(float_is_zero)
compare = staticmethod(float_compare)
class Monetary(Field):
""" Encapsulates a :class:`float` expressed in a given
:class:`res_currency<odoo.addons.base.models.res_currency.Currency>`.
The decimal precision and currency symbol are taken from the ``currency_field`` attribute.
:param str currency_field: name of the :class:`Many2one` field
holding the :class:`res_currency <odoo.addons.base.models.res_currency.Currency>`
this monetary field is expressed in (default: `\'currency_id\'`)
"""
type = 'monetary'
column_type = ('numeric', 'numeric')
column_cast_from = ('float8',)
currency_field = None
group_operator = 'sum'
def __init__(self, string=Default, currency_field=Default, **kwargs):
super(Monetary, self).__init__(string=string, currency_field=currency_field, **kwargs)
_description_currency_field = property(attrgetter('currency_field'))
def _setup_currency_field(self, model):
if not self.currency_field:
# pick a default, trying in order: 'currency_id', 'x_currency_id'
if 'currency_id' in model._fields:
self.currency_field = 'currency_id'
elif 'x_currency_id' in model._fields:
self.currency_field = 'x_currency_id'
assert self.currency_field in model._fields, \
"Field %s with unknown currency_field %r" % (self, self.currency_field)
def _setup_regular_full(self, model):
super(Monetary, self)._setup_regular_full(model)
self._setup_currency_field(model)
def _setup_related_full(self, model):
super(Monetary, self)._setup_related_full(model)
if self.inherited:
self.currency_field = self.related_field.currency_field
self._setup_currency_field(model)
def convert_to_column(self, value, record, values=None, validate=True):
# retrieve currency from values or record
if values and self.currency_field in values:
field = record._fields[self.currency_field]
currency = field.convert_to_cache(values[self.currency_field], record, validate)
currency = field.convert_to_record(currency, record)
else:
# Note: this is wrong if 'record' is several records with different
# currencies, which is functional nonsense and should not happen
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency = record[:1].with_context(prefetch_fields=False)[self.currency_field]
value = float(value or 0.0)
if currency:
return float_repr(currency.round(value), currency.decimal_places)
return value
def convert_to_cache(self, value, record, validate=True):
# cache format: float
value = float(value or 0.0)
if value and validate:
# FIXME @rco-odoo: currency may not be already initialized if it is
# a function or related field!
# BEWARE: do not prefetch other fields, because 'value' may be in
# cache, and would be overridden by the value read from database!
currency = record.sudo().with_context(prefetch_fields=False)[self.currency_field]
if len(currency) > 1:
raise ValueError("Got multiple currencies while assigning values of monetary field %s" % str(self))
elif currency:
value = currency.round(value)
return value
def convert_to_record(self, value, record):
return value or 0.0
def convert_to_read(self, value, record, use_name_get=True):
return value
def convert_to_write(self, value, record):
return value
class _String(Field):
""" Abstract class for string fields. """
translate = False # whether the field is translated
prefetch = None
def __init__(self, string=Default, **kwargs):
# translate is either True, False, or a callable
if 'translate' in kwargs and not callable(kwargs['translate']):
kwargs['translate'] = bool(kwargs['translate'])
super(_String, self).__init__(string=string, **kwargs)
def _setup_attrs(self, model, name):
super()._setup_attrs(model, name)
if self.prefetch is None:
# do not prefetch complex translated fields by default
self.prefetch = not callable(self.translate)
_related_translate = property(attrgetter('translate'))
def _description_translate(self, env):
return bool(self.translate)
def get_trans_terms(self, value):
""" Return the sequence of terms to translate found in `value`. """
if not callable(self.translate):
return [value] if value else []
terms = []
self.translate(terms.append, value)
return terms
def get_trans_func(self, records):
""" Return a translation function `translate` for `self` on the given
records; the function call `translate(record_id, value)` translates the
field value to the language given by the environment of `records`.
"""
if callable(self.translate):
rec_src_trans = records.env['ir.translation']._get_terms_translations(self, records)
def translate(record_id, value):
src_trans = rec_src_trans[record_id]
return self.translate(src_trans.get, value)
else:
rec_trans = records.env['ir.translation']._get_ids(
'%s,%s' % (self.model_name, self.name), 'model', records.env.lang, records.ids)
def translate(record_id, value):
return rec_trans.get(record_id) or value
return translate
def check_trans_value(self, value):
""" Check and possibly sanitize the translated term `value`. """
if callable(self.translate):
# do a "no-translation" to sanitize the value
callback = lambda term: None
return self.translate(callback, value)
else:
return value
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
cache.update(records, self, [cache_value] * len(records))
if not self.store:
return records
real_recs = records.filtered('id')
if not real_recs._ids:
return records
update_column = True
update_trans = False
single_lang = len(records.env['res.lang'].get_installed()) <= 1
if self.translate:
lang = records.env.lang or None # used in _update_translations below
if single_lang:
# a single language is installed
update_trans = True
elif callable(self.translate) or lang == 'en_US':
# update the source and synchronize translations
update_column = True
update_trans = True
elif lang != 'en_US' and lang is not None:
# update the translations only except if emptying
update_column = not cache_value
update_trans = True
# else: lang = None
# update towrite if modifying the source
if update_column:
towrite = records.env.all.towrite[self.model_name]
for rid in real_recs._ids:
# cache_value is already in database format
towrite[rid][self.name] = cache_value
if self.translate is True and cache_value:
tname = "%s,%s" % (records._name, self.name)
records.env['ir.translation']._set_source(tname, real_recs._ids, value)
if self.translate:
# invalidate the field in the other languages
cache.invalidate([(self, records.ids)])
cache.update(records, self, [cache_value] * len(records))
if update_trans:
if callable(self.translate):
# the source value of self has been updated, synchronize
# translated terms when possible
records.env['ir.translation']._sync_terms_translations(self, real_recs)
else:
# update translations
value = self.convert_to_column(value, records)
source_recs = real_recs.with_context(lang=None)
source_value = first(source_recs)[self.name]
if not source_value:
source_recs[self.name] = value
source_value = value
tname = "%s,%s" % (self.model_name, self.name)
if not value:
records.env['ir.translation'].search([
('name', '=', tname),
('type', '=', 'model'),
('res_id', 'in', real_recs._ids)
]).unlink()
elif single_lang:
records.env['ir.translation']._update_translations([dict(
src=source_value,
value=value,
name=tname,
lang=lang,
type='model',
state='translated',
res_id=res_id) for res_id in real_recs._ids])
else:
records.env['ir.translation']._set_ids(
tname, 'model', lang, real_recs._ids, value, source_value,
)
return records
class Char(_String):
""" Basic string field, can be length-limited, usually displayed as a
single-line string in clients.
:param int size: the maximum size of values stored for that field
:param bool trim: states whether the value is trimmed or not (by default,
``True``). Note that the trim operation is applied only by the web client.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'char'
column_cast_from = ('text',)
size = None # maximum size of values (deprecated)
trim = True # whether value is trimmed (only by web client)
@property
def column_type(self):
return ('varchar', pg_varchar(self.size))
def update_db_column(self, model, column):
if (
column and column['udt_name'] == 'varchar' and column['character_maximum_length'] and
(self.size is None or column['character_maximum_length'] < self.size)
):
# the column's varchar size does not match self.size; convert it
sql.convert_column(model._cr, model._table, self.name, self.column_type[1])
super(Char, self).update_db_column(model, column)
_related_size = property(attrgetter('size'))
_related_trim = property(attrgetter('trim'))
_description_size = property(attrgetter('size'))
_description_trim = property(attrgetter('trim'))
def _setup_regular_base(self, model):
super(Char, self)._setup_regular_base(model)
assert self.size is None or isinstance(self.size, int), \
"Char field %s with non-integer size %r" % (self, self.size)
def convert_to_column(self, value, record, values=None, validate=True):
if value is None or value is False:
return None
# we need to convert the string to a unicode object to be able
# to evaluate its length (and possibly truncate it) reliably
return pycompat.to_text(value)[:self.size]
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
return pycompat.to_text(value)[:self.size]
class Text(_String):
""" Very similar to :class:`Char` but used for longer contents, does not
have a size and usually displayed as a multiline text box.
:param translate: enable the translation of the field's values; use
``translate=True`` to translate field values as a whole; ``translate``
may also be a callable such that ``translate(callback, value)``
translates ``value`` by using ``callback(term)`` to retrieve the
translation of terms.
:type translate: bool or callable
"""
type = 'text'
column_type = ('text', 'text')
column_cast_from = ('varchar',)
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
return ustr(value)
class Html(_String):
""" Encapsulates an html code content.
:param bool sanitize: whether value must be sanitized (default: ``True``)
:param bool sanitize_tags: whether to sanitize tags
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_attributes: whether to sanitize attributes
(only a white list of attributes is accepted, default: ``True``)
:param bool sanitize_style: whether to sanitize style attributes (default: ``False``)
:param bool strip_style: whether to strip style attributes
(removed and therefore not sanitized, default: ``False``)
:param bool strip_classes: whether to strip classes attributes (default: ``False``)
"""
type = 'html'
column_type = ('text', 'text')
sanitize = True # whether value must be sanitized
sanitize_tags = True # whether to sanitize tags (only a white list of attributes is accepted)
sanitize_attributes = True # whether to sanitize attributes (only a white list of attributes is accepted)
sanitize_style = False # whether to sanitize style attributes
sanitize_form = True # whether to sanitize forms
strip_style = False # whether to strip style attributes (removed and therefore not sanitized)
strip_classes = False # whether to strip classes attributes
def _get_attrs(self, model, name):
# called by _setup_attrs(), working together with _String._setup_attrs()
attrs = super()._get_attrs(model, name)
# Translated sanitized html fields must use html_translate or a callable.
if attrs.get('translate') is True and attrs.get('sanitize', True):
attrs['translate'] = html_translate
return attrs
_related_sanitize = property(attrgetter('sanitize'))
_related_sanitize_tags = property(attrgetter('sanitize_tags'))
_related_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_related_sanitize_style = property(attrgetter('sanitize_style'))
_related_strip_style = property(attrgetter('strip_style'))
_related_strip_classes = property(attrgetter('strip_classes'))
_description_sanitize = property(attrgetter('sanitize'))
_description_sanitize_tags = property(attrgetter('sanitize_tags'))
_description_sanitize_attributes = property(attrgetter('sanitize_attributes'))
_description_sanitize_style = property(attrgetter('sanitize_style'))
_description_strip_style = property(attrgetter('strip_style'))
_description_strip_classes = property(attrgetter('strip_classes'))
def convert_to_column(self, value, record, values=None, validate=True):
if value is None or value is False:
return None
if self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
sanitize_form=self.sanitize_form,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
def convert_to_cache(self, value, record, validate=True):
if value is None or value is False:
return None
if validate and self.sanitize:
return html_sanitize(
value, silent=True,
sanitize_tags=self.sanitize_tags,
sanitize_attributes=self.sanitize_attributes,
sanitize_style=self.sanitize_style,
sanitize_form=self.sanitize_form,
strip_style=self.strip_style,
strip_classes=self.strip_classes)
return value
class Date(Field):
""" Encapsulates a python :class:`date <datetime.date>` object. """
type = 'date'
column_type = ('date', 'date')
column_cast_from = ('timestamp',)
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
@staticmethod
def today(*args):
"""Return the current day in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
return date.today()
@staticmethod
def context_today(record, timestamp=None):
"""Return the current date as seen in the client's timezone in a format
fit for date fields.
.. note:: This method may be used to compute default values.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: optional datetime value to use instead of
the current date and time (must be a datetime, regular dates
can't be converted between timezones).
:rtype: date
"""
today = timestamp or datetime.now()
context_today = None
tz_name = record._context.get('tz') or record.env.user.tz
if tz_name:
try:
today_utc = pytz.timezone('UTC').localize(today, is_dst=False) # UTC = no DST
context_today = today_utc.astimezone(pytz.timezone(tz_name))
except Exception:
_logger.debug("failed to compute context/client-specific today date, using UTC value for `today`",
exc_info=True)
return (context_today or today).date()
@staticmethod
def to_date(value):
"""Attempt to convert ``value`` to a :class:`date` object.
.. warning::
If a datetime object is given as value,
it will be converted to a date object and all
datetime-specific information will be lost (HMS, TZ, ...).
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
:rtype: date or None
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
return value.date()
return value
value = value[:DATE_LENGTH]
return datetime.strptime(value, DATE_FORMAT).date()
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_date
@staticmethod
def to_string(value):
"""
Convert a :class:`date` or :class:`datetime` object to a string.
:param value: value to convert.
:return: a string representing ``value`` in the server's date format, if ``value`` is of
type :class:`datetime`, the hours, minute, seconds, tzinfo will be truncated.
:rtype: str
"""
return value.strftime(DATE_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
if not value:
return None
if isinstance(value, datetime):
# TODO: better fix data files (crm demo data)
value = value.date()
# raise TypeError("%s (field %s) must be string or date, not datetime." % (value, self))
return self.to_date(value)
def convert_to_export(self, value, record):
if not value:
return ''
return self.from_string(value)
class Datetime(Field):
""" Encapsulates a python :class:`datetime <datetime.datetime>` object. """
type = 'datetime'
column_type = ('timestamp', 'timestamp')
column_cast_from = ('date',)
start_of = staticmethod(date_utils.start_of)
end_of = staticmethod(date_utils.end_of)
add = staticmethod(date_utils.add)
subtract = staticmethod(date_utils.subtract)
@staticmethod
def now(*args):
"""Return the current day and time in the format expected by the ORM.
.. note:: This function may be used to compute default values.
"""
# microseconds must be annihilated as they don't comply with the server datetime format
return datetime.now().replace(microsecond=0)
@staticmethod
def today(*args):
"""Return the current day, at midnight (00:00:00)."""
return Datetime.now().replace(hour=0, minute=0, second=0)
@staticmethod
def context_timestamp(record, timestamp):
"""Return the given timestamp converted to the client's timezone.
.. note:: This method is *not* meant for use as a default initializer,
because datetime fields are automatically converted upon
display on client side. For default values, :meth:`now`
should be used instead.
:param record: recordset from which the timezone will be obtained.
:param datetime timestamp: naive datetime value (expressed in UTC)
to be converted to the client timezone.
:return: timestamp converted to timezone-aware datetime in context timezone.
:rtype: datetime
"""
assert isinstance(timestamp, datetime), 'Datetime instance expected'
tz_name = record._context.get('tz') or record.env.user.tz
utc_timestamp = pytz.utc.localize(timestamp, is_dst=False) # UTC = no DST
if tz_name:
try:
context_tz = pytz.timezone(tz_name)
return utc_timestamp.astimezone(context_tz)
except Exception:
_logger.debug("failed to compute context/client-specific timestamp, "
"using the UTC value",
exc_info=True)
return utc_timestamp
@staticmethod
def to_datetime(value):
"""Convert an ORM ``value`` into a :class:`datetime` value.
:param value: value to convert.
:type value: str or date or datetime
:return: an object representing ``value``.
:rtype: datetime or None
"""
if not value:
return None
if isinstance(value, date):
if isinstance(value, datetime):
if value.tzinfo:
raise ValueError("Datetime field expects a naive datetime: %s" % value)
return value
return datetime.combine(value, time.min)
# TODO: fix data files
return datetime.strptime(value, DATETIME_FORMAT[:len(value)-2])
# kept for backwards compatibility, but consider `from_string` as deprecated, will probably
# be removed after V12
from_string = to_datetime
@staticmethod
def to_string(value):
"""Convert a :class:`datetime` or :class:`date` object to a string.
:param value: value to convert.
:type value: datetime or date
:return: a string representing ``value`` in the server's datetime format,
if ``value`` is of type :class:`date`,
the time portion will be midnight (00:00:00).
:rtype: str
"""
return value.strftime(DATETIME_FORMAT) if value else False
def convert_to_cache(self, value, record, validate=True):
return self.to_datetime(value)
def convert_to_export(self, value, record):
if not value:
return ''
value = self.convert_to_display_name(value, record)
return self.from_string(value)
def convert_to_display_name(self, value, record):
assert record, 'Record expected'
return Datetime.to_string(Datetime.context_timestamp(record, Datetime.from_string(value)))
# http://initd.org/psycopg/docs/usage.html#binary-adaptation
# Received data is returned as buffer (in Python 2) or memoryview (in Python 3).
_BINARY = memoryview
class Binary(Field):
"""Encapsulates a binary content (e.g. a file).
:param bool attachment: whether the field should be stored as `ir_attachment`
or in a column of the model's table (default: ``True``).
"""
type = 'binary'
prefetch = False # not prefetched by default
_depends_context = ('bin_size',) # depends on context (content or size)
attachment = True # whether value is stored in attachment
@property
def column_type(self):
return None if self.attachment else ('bytea', 'bytea')
def _get_attrs(self, model, name):
attrs = super(Binary, self)._get_attrs(model, name)
if not attrs.get('store', True):
attrs['attachment'] = False
return attrs
_description_attachment = property(attrgetter('attachment'))
def convert_to_column(self, value, record, values=None, validate=True):
# Binary values may be byte strings (python 2.6 byte array), but
# the legacy OpenERP convention is to transfer and store binaries
# as base64-encoded strings. The base64 string may be provided as a
# unicode in some circumstances, hence the str() cast here.
# This str() coercion will only work for pure ASCII unicode strings,
# on purpose - non base64 data must be passed as a 8bit byte strings.
if not value:
return None
# Detect if the binary content is an SVG for restricting its upload
# only to system users.
magic_bytes = {
b'P', # first 6 bits of '<' (0x3C) b64 encoded
b'<', # plaintext XML tag opening
}
if isinstance(value, str):
value = value.encode()
if value[:1] in magic_bytes:
try:
decoded_value = base64.b64decode(value.translate(None, delete=b'\r\n'), validate=True)
except binascii.Error:
decoded_value = value
# Full mimetype detection
if (guess_mimetype(decoded_value).startswith('image/svg') and
not record.env.is_system()):
raise UserError(_("Only admins can upload SVG files."))
if isinstance(value, bytes):
return psycopg2.Binary(value)
try:
return psycopg2.Binary(str(value).encode('ascii'))
except UnicodeEncodeError:
raise UserError(_("ASCII characters are required for %s in %s") % (value, self.name))
def convert_to_cache(self, value, record, validate=True):
if isinstance(value, _BINARY):
return bytes(value)
if isinstance(value, str):
# the cache must contain bytes or memoryview, but sometimes a string
# is given when assigning a binary field (test `TestFileSeparator`)
return value.encode()
if isinstance(value, int) and \
(record._context.get('bin_size') or
record._context.get('bin_size_' + self.name)):
# If the client requests only the size of the field, we return that
# instead of the content. Presumably a separate request will be done
# to read the actual content, if necessary.
value = human_size(value)
# human_size can return False (-> None) or a string (-> encoded)
return value.encode() if value else None
return None if value is False else value
def convert_to_record(self, value, record):
if isinstance(value, _BINARY):
return bytes(value)
return False if value is None else value
def compute_value(self, records):
bin_size_name = 'bin_size_' + self.name
if records.env.context.get('bin_size') or records.env.context.get(bin_size_name):
# always compute without bin_size
records_no_bin_size = records.with_context(**{'bin_size': False, bin_size_name: False})
super().compute_value(records_no_bin_size)
# manually update the bin_size cache
cache = records.env.cache
for record_no_bin_size, record in zip(records_no_bin_size, records):
try:
value = cache.get(record_no_bin_size, self)
try:
value = base64.b64decode(value)
except (TypeError, binascii.Error):
pass
try:
if isinstance(value, (bytes, _BINARY)):
value = human_size(len(value))
except (TypeError):
pass
cache_value = self.convert_to_cache(value, record)
cache.set(record, self, cache_value)
except CacheMiss:
pass
else:
super().compute_value(records)
def read(self, records):
# values are stored in attachments, retrieve them
assert self.attachment
domain = [
('res_model', '=', records._name),
('res_field', '=', self.name),
('res_id', 'in', records.ids),
]
# Note: the 'bin_size' flag is handled by the field 'datas' itself
data = {
att.res_id: att.datas
for att in records.env['ir.attachment'].sudo().search(domain)
}
cache = records.env.cache
for record in records:
cache.set(record, self, data.get(record.id, False))
def create(self, record_values):
assert self.attachment
if not record_values:
return
# create the attachments that store the values
env = record_values[0][0].env
with env.norecompute():
env['ir.attachment'].sudo().with_context(
binary_field_real_user=env.user,
).create([{
'name': self.name,
'res_model': self.model_name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record, value in record_values
if value
])
def write(self, records, value):
if not self.attachment:
return super().write(records, value)
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# update the cache, and discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
if self.store:
# determine records that are known to be not null
not_null = cache.get_records_different_from(records, self, None)
cache.update(records, self, [cache_value] * len(records))
# retrieve the attachments that store the values, and adapt them
if self.store and any(records._ids):
real_records = records.filtered('id')
atts = records.env['ir.attachment'].sudo()
if not_null:
atts = atts.search([
('res_model', '=', self.model_name),
('res_field', '=', self.name),
('res_id', 'in', real_records.ids),
])
if value:
# update the existing attachments
atts.write({'datas': value})
atts_records = records.browse(atts.mapped('res_id'))
# create the missing attachments
missing = (real_records - atts_records)
if missing:
atts.create([{
'name': self.name,
'res_model': record._name,
'res_field': self.name,
'res_id': record.id,
'type': 'binary',
'datas': value,
}
for record in missing
])
else:
atts.unlink()
return records
class Image(Binary):
"""Encapsulates an image, extending :class:`Binary`.
If image size is greater than the ``max_width``/``max_height`` limit of pixels, the image will be
resized to the limit by keeping aspect ratio.
:param int max_width: the maximum width of the image (default: ``0``, no limit)
:param int max_height: the maximum height of the image (default: ``0``, no limit)
:param bool verify_resolution: whether the image resolution should be verified
to ensure it doesn't go over the maximum image resolution (default: ``True``).
See :class:`odoo.tools.image.ImageProcess` for maximum image resolution (default: ``45e6``).
.. note::
If no ``max_width``/``max_height`` is specified (or is set to 0) and ``verify_resolution`` is False,
the field content won't be verified at all and a :class:`Binary` field should be used.
"""
max_width = 0
max_height = 0
verify_resolution = True
def create(self, record_values):
new_record_values = []
for record, value in record_values:
# strange behavior when setting related image field, when `self`
# does not resize the same way as its related field
new_value = self._image_process(value)
new_record_values.append((record, new_value))
cache_value = self.convert_to_cache(value if self.related else new_value, record)
record.env.cache.update(record, self, [cache_value] * len(record))
super(Image, self).create(new_record_values)
def write(self, records, value):
try:
new_value = self._image_process(value)
except UserError:
if not any(records._ids):
# Some crap is assigned to a new record. This can happen in an
# onchange, where the client sends the "bin size" value of the
# field instead of its full value (this saves bandwidth). In
# this case, we simply don't assign the field: its value will be
# taken from the records' origin.
return
raise
super(Image, self).write(records, new_value)
cache_value = self.convert_to_cache(value if self.related else new_value, records)
records.env.cache.update(records, self, [cache_value] * len(records))
def _image_process(self, value):
return image_process(value,
size=(self.max_width, self.max_height),
verify_resolution=self.verify_resolution,
)
def _process_related(self, value):
"""Override to resize the related value before saving it on self."""
try:
return self._image_process(super()._process_related(value))
except UserError:
# Avoid the following `write` to fail if the related image was saved
# invalid, which can happen for pre-existing databases.
return False
class Selection(Field):
""" Encapsulates an exclusive choice between different values.
:param selection: specifies the possible values for this field.
It is given as either a list of pairs ``(value, label)``, or a model
method, or a method name.
:type selection: list(tuple(str,str)) or callable or str
:param selection_add: provides an extension of the selection in the case
of an overridden field. It is a list of pairs ``(value, label)`` or
singletons ``(value,)``, where singleton values must appear in the
overridden selection. The new values are inserted in an order that is
consistent with the overridden selection and this list::
selection = [('a', 'A'), ('b', 'B')]
selection_add = [('c', 'C'), ('b',)]
> result = [('a', 'A'), ('c', 'C'), ('b', 'B')]
:type selection_add: list(tuple(str,str))
:param ondelete: provides a fallback mechanism for any overridden
field with a selection_add. It is a dict that maps every option
from the selection_add to a fallback action.
This fallback action will be applied to all records whose
selection_add option maps to it.
The actions can be any of the following:
- 'set null' -- the default, all records with this option
will have their selection value set to False.
- 'cascade' -- all records with this option will be
deleted along with the option itself.
- 'set default' -- all records with this option will be
set to the default of the field definition
- <callable> -- a callable whose first and only argument will be
the set of records containing the specified Selection option,
for custom processing
The attribute ``selection`` is mandatory except in the case of
``related`` or extended fields.
"""
type = 'selection'
column_type = ('varchar', pg_varchar())
selection = None # [(value, string), ...], function or method name
validate = True # whether validating upon write
ondelete = None # {value: policy} (what to do when value is deleted)
def __init__(self, selection=Default, string=Default, **kwargs):
super(Selection, self).__init__(selection=selection, string=string, **kwargs)
def _setup_regular_base(self, model):
super(Selection, self)._setup_regular_base(model)
assert self.selection is not None, "Field %s without selection" % self
if isinstance(self.selection, list):
assert all(isinstance(v, str) for v, _ in self.selection), \
"Field %s with non-str value in selection" % self
def _setup_related_full(self, model):
super(Selection, self)._setup_related_full(model)
# selection must be computed on related field
field = self.related_field
self.selection = lambda model: field._description_selection(model.env)
def _get_attrs(self, model, name):
attrs = super(Selection, self)._get_attrs(model, name)
# arguments 'selection' and 'selection_add' are processed below
attrs.pop('selection_add', None)
return attrs
def _setup_attrs(self, model, name):
super(Selection, self)._setup_attrs(model, name)
# determine selection (applying 'selection_add' extensions)
values = None
labels = {}
for field in reversed(resolve_mro(model, name, self._can_setup_from)):
# We cannot use field.selection or field.selection_add here
# because those attributes are overridden by ``_setup_attrs``.
if 'selection' in field.args:
if self.related:
_logger.warning("%s: selection attribute will be ignored as the field is related", self)
selection = field.args['selection']
if isinstance(selection, list):
if values is not None and values != [kv[0] for kv in selection]:
_logger.warning("%s: selection=%r overrides existing selection; use selection_add instead", self, selection)
values = [kv[0] for kv in selection]
labels = dict(selection)
self.ondelete = {}
else:
values = None
labels = {}
self.selection = selection
self.ondelete = None
if 'selection_add' in field.args:
if self.related:
_logger.warning("%s: selection_add attribute will be ignored as the field is related", self)
selection_add = field.args['selection_add']
assert isinstance(selection_add, list), \
"%s: selection_add=%r must be a list" % (self, selection_add)
assert values is not None, \
"%s: selection_add=%r on non-list selection %r" % (self, selection_add, self.selection)
ondelete = field.args.get('ondelete') or {}
new_values = [kv[0] for kv in selection_add if kv[0] not in values]
for key in new_values:
ondelete.setdefault(key, 'set null')
if self.required and new_values and 'set null' in ondelete.values():
raise ValueError(
"%r: required selection fields must define an ondelete policy that "
"implements the proper cleanup of the corresponding records upon "
"module uninstallation. Please use one or more of the following "
"policies: 'set default' (if the field has a default defined), 'cascade', "
"or a single-argument callable where the argument is the recordset "
"containing the specified option." % self
)
# check ondelete values
for key, val in ondelete.items():
if callable(val) or val in ('set null', 'cascade'):
continue
if val == 'set default':
assert self.default is not None, (
"%r: ondelete policy of type 'set default' is invalid for this field "
"as it does not define a default! Either define one in the base "
"field, or change the chosen ondelete policy" % self
)
continue
raise ValueError(
"%r: ondelete policy %r for selection value %r is not a valid ondelete "
"policy, please choose one of 'set null', 'set default', 'cascade' or "
"a callable" % (self, val, key)
)
values = merge_sequences(values, [kv[0] for kv in selection_add])
labels.update(kv for kv in selection_add if len(kv) == 2)
self.ondelete.update(ondelete)
if values is not None:
self.selection = [(value, labels[value]) for value in values]
def _selection_modules(self, model):
""" Return a mapping from selection values to modules defining each value. """
if not isinstance(self.selection, list):
return {}
value_modules = defaultdict(set)
for field in reversed(resolve_mro(model, self.name, self._can_setup_from)):
module = field.args.get('_module')
if not module:
continue
if 'selection' in field.args:
value_modules.clear()
if isinstance(field.args['selection'], list):
for value, label in field.args['selection']:
value_modules[value].add(module)
if 'selection_add' in field.args:
for value_label in field.args['selection_add']:
if len(value_label) > 1:
value_modules[value_label[0]].add(module)
return value_modules
def _description_selection(self, env):
""" return the selection list (pairs (value, label)); labels are
translated according to context language
"""
selection = self.selection
if isinstance(selection, str):
return getattr(env[self.model_name], selection)()
if callable(selection):
return selection(env[self.model_name])
# translate selection labels
if env.lang:
return env['ir.translation'].get_field_selection(self.model_name, self.name)
else:
return selection
def get_values(self, env):
"""Return a list of the possible values."""
selection = self.selection
if isinstance(selection, str):
selection = getattr(env[self.model_name], selection)()
elif callable(selection):
selection = selection(env[self.model_name])
return [value for value, _ in selection]
def convert_to_column(self, value, record, values=None, validate=True):
if validate and self.validate:
value = self.convert_to_cache(value, record)
return super(Selection, self).convert_to_column(value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
if not validate:
return value or None
if value and self.column_type[0] == 'int4':
value = int(value)
if value in self.get_values(record.env):
return value
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
if not isinstance(self.selection, list):
# FIXME: this reproduces an existing buggy behavior!
return value if value else ''
for item in self._description_selection(record.env):
if item[0] == value:
return item[1]
return ''
class Reference(Selection):
""" Pseudo-relational field (no FK in database).
The field value is stored as a :class:`string <str>` following the pattern
``"res_model.res_id"`` in database.
"""
type = 'reference'
@property
def column_type(self):
return ('varchar', pg_varchar())
def convert_to_column(self, value, record, values=None, validate=True):
return Field.convert_to_column(self, value, record, values, validate)
def convert_to_cache(self, value, record, validate=True):
# cache format: str ("model,id") or None
if isinstance(value, BaseModel):
if not validate or (value._name in self.get_values(record.env) and len(value) <= 1):
return "%s,%s" % (value._name, value.id) if value else None
elif isinstance(value, str):
res_model, res_id = value.split(',')
if not validate or res_model in self.get_values(record.env):
if record.env[res_model].browse(int(res_id)).exists():
return value
else:
return None
elif not value:
return None
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_record(self, value, record):
if value:
res_model, res_id = value.split(',')
return record.env[res_model].browse(int(res_id))
return None
def convert_to_read(self, value, record, use_name_get=True):
return "%s,%s" % (value._name, value.id) if value else False
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return ustr(value and value.display_name)
class _Relational(Field):
""" Abstract class for relational fields. """
relational = True
domain = [] # domain for searching values
context = {} # context for searching values
check_company = False
def __get__(self, records, owner):
# base case: do the regular access
if records is None or len(records._ids) <= 1:
return super().__get__(records, owner)
# multirecord case: use mapped
return self.mapped(records)
def _setup_regular_base(self, model):
super(_Relational, self)._setup_regular_base(model)
if self.comodel_name not in model.pool:
_logger.warning("Field %s with unknown comodel_name %r", self, self.comodel_name)
self.comodel_name = '_unknown'
def get_domain_list(self, model):
""" Return a list domain from the domain parameter. """
domain = self.domain
if callable(domain):
domain = domain(model)
return domain if isinstance(domain, list) else []
@property
def _related_domain(self):
if callable(self.domain):
# will be called with another model than self's
return lambda recs: self.domain(recs.env[self.model_name])
else:
# maybe not correct if domain is a string...
return self.domain
_related_context = property(attrgetter('context'))
_description_relation = property(attrgetter('comodel_name'))
_description_context = property(attrgetter('context'))
def _description_domain(self, env):
if self.check_company and not self.domain:
if self.company_dependent:
if self.comodel_name == "res.users":
# user needs access to current company (self.env.company)
return "[('company_ids', 'in', allowed_company_ids[0])]"
else:
return "[('company_id', 'in', [allowed_company_ids[0], False])]"
else:
# when using check_company=True on a field on 'res.company', the
# company_id comes from the id of the current record
cid = "id" if self.model_name == "res.company" else "company_id"
if self.comodel_name == "res.users":
# User allowed company ids = user.company_ids
return f"['|', (not {cid}, '=', True), ('company_ids', 'in', [{cid}])]"
else:
return f"[('company_id', 'in', [{cid}, False])]"
return self.domain(env[self.model_name]) if callable(self.domain) else self.domain
def null(self, record):
return record.env[self.comodel_name]
class Many2one(_Relational):
""" The value of such a field is a recordset of size 0 (no
record) or 1 (a single record).
:param str comodel_name: name of the target model
``Mandatory`` except for related or extended fields.
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param str ondelete: what to do when the referred record is deleted;
possible values are: ``'set null'``, ``'restrict'``, ``'cascade'``
:param bool auto_join: whether JOINs are generated upon search through that
field (default: ``False``)
:param bool delegate: set it to ``True`` to make fields of the target model
accessible from the current model (corresponds to ``_inherits``)
:param bool check_company: Mark the field to be verified in
:meth:`~odoo.models.Model._check_company`. Add a default company
domain depending on the field attributes.
"""
type = 'many2one'
column_type = ('int4', 'int4')
ondelete = None # what to do when value is deleted
auto_join = False # whether joins are generated upon search
delegate = False # whether self implements delegation
def __init__(self, comodel_name=Default, string=Default, **kwargs):
super(Many2one, self).__init__(comodel_name=comodel_name, string=string, **kwargs)
def _setup_attrs(self, model, name):
super(Many2one, self)._setup_attrs(model, name)
# determine self.delegate
if not self.delegate:
self.delegate = name in model._inherits.values()
def _setup_regular_base(self, model):
super()._setup_regular_base(model)
# 3 cases:
# 1) The ondelete attribute is not defined, we assign it a sensible default
# 2) The ondelete attribute is defined and its definition makes sense
# 3) The ondelete attribute is explicitly defined as 'set null' for a required m2o,
# this is considered a programming error.
if not self.ondelete:
comodel = model.env[self.comodel_name]
if model.is_transient() and not comodel.is_transient():
# Many2one relations from TransientModel Model are annoying because
# they can block deletion due to foreign keys. So unless stated
# otherwise, we default them to ondelete='cascade'.
self.ondelete = 'cascade' if self.required else 'set null'
else:
self.ondelete = 'restrict' if self.required else 'set null'
if self.ondelete == 'set null' and self.required:
raise ValueError(
"The m2o field %s of model %s is required but declares its ondelete policy "
"as being 'set null'. Only 'restrict' and 'cascade' make sense."
% (self.name, model._name)
)
if self.ondelete == 'restrict' and self.comodel_name in IR_MODELS:
raise ValueError(
f"Field {self.name} of model {model._name} is defined as ondelete='restrict' "
f"while having {self.comodel_name} as comodel, the 'restrict' mode is not "
f"supported for this type of field as comodel."
)
def update_db(self, model, columns):
comodel = model.env[self.comodel_name]
if not model.is_transient() and comodel.is_transient():
raise ValueError('Many2one %s from Model to TransientModel is forbidden' % self)
return super(Many2one, self).update_db(model, columns)
def update_db_column(self, model, column):
super(Many2one, self).update_db_column(model, column)
model.pool.post_init(self.update_db_foreign_key, model, column)
def update_db_foreign_key(self, model, column):
comodel = model.env[self.comodel_name]
# foreign keys do not work on views, and users can define custom models on sql views.
if not model._is_an_ordinary_table() or not comodel._is_an_ordinary_table():
return
# ir_actions is inherited, so foreign key doesn't work on it
if not comodel._auto or comodel._table == 'ir_actions':
return
# create/update the foreign key, and reflect it in 'ir.model.constraint'
model.pool.add_foreign_key(
model._table, self.name, comodel._table, 'id', self.ondelete or 'set null',
model, self._module
)
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``. """
cache = records.env.cache
for record in records:
cache.set(record, self, self.convert_to_cache(value, record, validate=False))
def convert_to_column(self, value, record, values=None, validate=True):
return value or None
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if type(value) in IdType:
id_ = value
elif isinstance(value, BaseModel):
if validate and (value._name != self.comodel_name or len(value) > 1):
raise ValueError("Wrong value for %s: %r" % (self, value))
id_ = value._ids[0] if value._ids else None
elif isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
id_ = value[0] if value else None
elif isinstance(value, dict):
# return a new record (with the given field 'id' as origin)
comodel = record.env[self.comodel_name]
origin = comodel.browse(value.get('id'))
id_ = comodel.new(value, origin=origin).id
else:
id_ = None
if self.delegate and record and not any(record._ids):
# if all records are new, then so is the parent
id_ = id_ and NewId(id_)
return id_
def convert_to_record(self, value, record):
# use registry to avoid creating a recordset for the model
ids = () if value is None else (value,)
prefetch_ids = IterableGenerator(prefetch_many2one_ids, record, self)
return record.pool[self.comodel_name]._browse(record.env, ids, prefetch_ids)
def convert_to_record_multi(self, values, records):
# return the ids as a recordset without duplicates
prefetch_ids = IterableGenerator(prefetch_many2one_ids, records, self)
ids = tuple(unique(id_ for id_ in values if id_ is not None))
return records.pool[self.comodel_name]._browse(records.env, ids, prefetch_ids)
def convert_to_read(self, value, record, use_name_get=True):
if use_name_get and value:
# evaluate name_get() as superuser, because the visibility of a
# many2one field value (id and name) depends on the current record's
# access rights, and not the value's access rights.
try:
# performance: value.sudo() prefetches the same records as value
return (value.id, value.sudo().display_name)
except MissingError:
# Should not happen, unless the foreign key is missing.
return False
else:
return value.id
def convert_to_write(self, value, record):
if type(value) in IdType:
return value
if not value:
return False
if isinstance(value, BaseModel) and value._name == self.comodel_name:
return value.id
if isinstance(value, tuple):
# value is either a pair (id, name), or a tuple of ids
return value[0] if value else False
if isinstance(value, dict):
return record.env[self.comodel_name].new(value).id
raise ValueError("Wrong value for %s: %r" % (self, value))
def convert_to_export(self, value, record):
return value.display_name if value else ''
def convert_to_display_name(self, value, record):
return ustr(value.display_name)
def convert_to_onchange(self, value, record, names):
if not value.id:
return False
return super(Many2one, self).convert_to_onchange(value, record, names)
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
# discard the records that are not modified
cache = records.env.cache
cache_value = self.convert_to_cache(value, records)
records = cache.get_records_different_from(records, self, cache_value)
if not records:
return records
# remove records from the cache of one2many fields of old corecords
self._remove_inverses(records, cache_value)
# update the cache of self
cache.update(records, self, [cache_value] * len(records))
# update towrite
if self.store:
towrite = records.env.all.towrite[self.model_name]
for record in records.filtered('id'):
# cache_value is already in database format
towrite[record.id][self.name] = cache_value
# update the cache of one2many fields of new corecord
self._update_inverses(records, cache_value)
return records
def _remove_inverses(self, records, value):
""" Remove `records` from the cached values of the inverse fields of `self`. """
cache = records.env.cache
record_ids = set(records._ids)
# align(id) returns a NewId if records are new, a real id otherwise
align = (lambda id_: id_) if all(record_ids) else (lambda id_: id_ and NewId(id_))
for invf in records._field_inverses[self]:
corecords = records.env[self.comodel_name].browse(
align(id_) for id_ in cache.get_values(records, self)
)
for corecord in corecords:
ids0 = cache.get(corecord, invf, None)
if ids0 is not None:
ids1 = tuple(id_ for id_ in ids0 if id_ not in record_ids)
cache.set(corecord, invf, ids1)
def _update_inverses(self, records, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if value is None:
return
cache = records.env.cache
corecord = self.convert_to_record(value, records)
for invf in records._field_inverses[self]:
valid_records = records.filtered_domain(invf.get_domain_list(corecord))
if not valid_records:
continue
ids0 = cache.get(corecord, invf, None)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + valid_records._ids))
cache.set(corecord, invf, ids1)
class Many2oneReference(Integer):
""" Pseudo-relational field (no FK in database).
The field value is stored as an :class:`integer <int>` id in database.
Contrary to :class:`Reference` fields, the model has to be specified
in a :class:`Char` field, whose name has to be specified in the
`model_field` attribute for the current :class:`Many2oneReference` field.
:param str model_field: name of the :class:`Char` where the model name is stored.
"""
type = 'many2one_reference'
model_field = None
_related_model_field = property(attrgetter('model_field'))
def convert_to_cache(self, value, record, validate=True):
# cache format: id or None
if isinstance(value, BaseModel):
value = value._ids[0] if value._ids else None
return super().convert_to_cache(value, record, validate)
def _remove_inverses(self, records, value):
# TODO: unused
# remove records from the cache of one2many fields of old corecords
cache = records.env.cache
record_ids = set(records._ids)
model_ids = self._record_ids_per_res_model(records)
for invf in records._field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecords = records.env[invf.model_name].browse(
id_ for id_ in cache.get_values(records, self)
)
for corecord in corecords:
ids0 = cache.get(corecord, invf, None)
if ids0 is not None:
ids1 = tuple(id_ for id_ in ids0 if id_ not in record_ids)
cache.set(corecord, invf, ids1)
def _update_inverses(self, records, value):
""" Add `records` to the cached values of the inverse fields of `self`. """
if not value:
return
cache = records.env.cache
model_ids = self._record_ids_per_res_model(records)
for invf in records._field_inverses[self]:
records = records.browse(model_ids[invf.model_name])
if not records:
continue
corecord = records.env[invf.model_name].browse(value)
records = records.filtered_domain(invf.get_domain_list(corecord))
if not records:
continue
ids0 = cache.get(corecord, invf, None)
# if the value for the corecord is not in cache, but this is a new
# record, assign it anyway, as you won't be able to fetch it from
# database (see `test_sale_order`)
if ids0 is not None or not corecord.id:
ids1 = tuple(unique((ids0 or ()) + records._ids))
cache.set(corecord, invf, ids1)
def _record_ids_per_res_model(self, records):
model_ids = defaultdict(set)
for record in records:
model = record[self.model_field]
if not model and record._fields[self.model_field].compute:
# fallback when the model field is computed :-/
record._fields[self.model_field].compute_value(record)
model = record[self.model_field]
if not model:
continue
model_ids[model].add(record.id)
return model_ids
class _RelationalMulti(_Relational):
""" Abstract class for relational fields *2many. """
# Important: the cache contains the ids of all the records in the relation,
# including inactive records. Inactive records are filtered out by
# convert_to_record(), depending on the context.
def _update(self, records, value):
""" Update the cached value of ``self`` for ``records`` with ``value``,
and return whether everything is in cache.
"""
if not isinstance(records, BaseModel):
# the inverse of self is a non-relational field; `value` is a
# corecord that refers to `records` by an integer field
model = value.env[self.model_name]
domain = self.domain(model) if callable(self.domain) else self.domain
if not value.filtered_domain(domain):
return
records = model.browse(records)
result = True
if value:
cache = records.env.cache
for record in records:
if cache.contains(record, self):
val = self.convert_to_cache(record[self.name] | value, record, validate=False)
cache.set(record, self, val)
else:
result = False
records.modified([self.name])
return result
def convert_to_cache(self, value, record, validate=True):
# cache format: tuple(ids)
if isinstance(value, BaseModel):
if validate and value._name != self.comodel_name:
raise ValueError("Wrong value for %s: %s" % (self, value))
ids = value._ids
if record and not record.id:
# x2many field value of new record is new records
ids = tuple(it and NewId(it) for it in ids)
return ids
elif isinstance(value, (list, tuple)):
# value is a list/tuple of commands, dicts or record ids
comodel = record.env[self.comodel_name]
# if record is new, the field's value is new records
if record and not record.id:
browse = lambda it: comodel.browse([it and NewId(it)])
else:
browse = comodel.browse
# determine the value ids
ids = OrderedSet(record[self.name]._ids if validate else ())
# modify ids with the commands
for command in value:
if isinstance(command, (tuple, list)):
if command[0] == 0:
ids.add(comodel.new(command[2], ref=command[1]).id)
elif command[0] == 1:
line = browse(command[1])
if validate:
line.update(command[2])
else:
line._update_cache(command[2], validate=False)
ids.add(line.id)
elif command[0] in (2, 3):
ids.discard(browse(command[1]).id)
elif command[0] == 4:
ids.add(browse(command[1]).id)
elif command[0] == 5:
ids.clear()
elif command[0] == 6:
ids = OrderedSet(browse(it).id for it in command[2])
elif isinstance(command, dict):
ids.add(comodel.new(command).id)
else:
ids.add(browse(command).id)
# return result as a tuple
return tuple(ids)
elif not value:
return ()
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_record(self, value, record):
# use registry to avoid creating a recordset for the model
prefetch_ids = IterableGenerator(prefetch_x2many_ids, record, self)
Comodel = record.pool[self.comodel_name]
corecords = Comodel._browse(record.env, value, prefetch_ids)
if (
Comodel._active_name
and self.context.get('active_test', record.env.context.get('active_test', True))
):
corecords = corecords.filtered(Comodel._active_name).with_prefetch(prefetch_ids)
return corecords
def convert_to_record_multi(self, values, records):
# return the list of ids as a recordset without duplicates
prefetch_ids = IterableGenerator(prefetch_x2many_ids, records, self)
Comodel = records.pool[self.comodel_name]
ids = tuple(unique(id_ for ids in values for id_ in ids))
corecords = Comodel._browse(records.env, ids, prefetch_ids)
if (
Comodel._active_name
and self.context.get('active_test', records.env.context.get('active_test', True))
):
corecords = corecords.filtered(Comodel._active_name).with_prefetch(prefetch_ids)
return corecords
def convert_to_read(self, value, record, use_name_get=True):
return value.ids
def convert_to_write(self, value, record):
if isinstance(value, tuple):
# a tuple of ids, this is the cache format
value = record.env[self.comodel_name].browse(value)
if isinstance(value, BaseModel) and value._name == self.comodel_name:
def get_origin(val):
return val._origin if isinstance(val, BaseModel) else val
# make result with new and existing records
inv_names = {field.name for field in record._field_inverses[self]}
result = [(6, 0, [])]
for record in value:
origin = record._origin
if not origin:
values = record._convert_to_write({
name: record[name]
for name in record._cache
if name not in inv_names
})
result.append((0, 0, values))
else:
result[0][2].append(origin.id)
if record != origin:
values = record._convert_to_write({
name: record[name]
for name in record._cache
if name not in inv_names and get_origin(record[name]) != origin[name]
})
if values:
result.append((1, origin.id, values))
return result
if value is False or value is None:
return [(5,)]
if isinstance(value, list):
return value
raise ValueError("Wrong value for %s: %s" % (self, value))
def convert_to_export(self, value, record):
return ','.join(name for id, name in value.name_get()) if value else ''
def convert_to_display_name(self, value, record):
raise NotImplementedError()
def _setup_regular_full(self, model):
super(_RelationalMulti, self)._setup_regular_full(model)
if not self.compute and isinstance(self.domain, list):
self.depends = tuple(unique(itertools.chain(self.depends, (
self.name + '.' + arg[0]
for arg in self.domain
if isinstance(arg, (tuple, list)) and isinstance(arg[0], str)
))))
def create(self, record_values):
""" Write the value of ``self`` on the given records, which have just
been created.
:param record_values: a list of pairs ``(record, value)``, where
``value`` is in the format of method :meth:`BaseModel.write`
"""
self.write_batch(record_values, True)
def write(self, records, value):
# discard recomputation of self on records
records.env.remove_to_compute(self, records)
return self.write_batch([(records, value)])
def write_batch(self, records_commands_list, create=False):
if not records_commands_list:
return False
for idx, (recs, value) in enumerate(records_commands_list):
if isinstance(value, tuple):
value = [(6, 0, value)]
elif isinstance(value, BaseModel) and value._name == self.comodel_name:
value = [(6, 0, value._ids)]
elif value is False or value is None:
value = [(5,)]
elif isinstance(value, list) and value and not isinstance(value[0], (tuple, list)):
value = [(6, 0, tuple(value))]
if not isinstance(value, list):
raise ValueError("Wrong value for %s: %s" % (self, value))
records_commands_list[idx] = (recs, value)
record_ids = {rid for recs, cs in records_commands_list for rid in recs._ids}
if all(record_ids):
return self.write_real(records_commands_list, create)
else:
assert not any(record_ids)
return self.write_new(records_commands_list)
class One2many(_RelationalMulti):
"""One2many field; the value of such a field is the recordset of all the
records in ``comodel_name`` such that the field ``inverse_name`` is equal to
the current record.
:param str comodel_name: name of the target model
:param str inverse_name: name of the inverse ``Many2one`` field in
``comodel_name``
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param bool auto_join: whether JOINs are generated upon search through that
field (default: ``False``)
:param int limit: optional limit to use upon read
The attributes ``comodel_name`` and ``inverse_name`` are mandatory except in
the case of related fields or field extensions.
"""
type = 'one2many'
inverse_name = None # name of the inverse field
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
copy = False # o2m are not copied by default
def __init__(self, comodel_name=Default, inverse_name=Default, string=Default, **kwargs):
super(One2many, self).__init__(
comodel_name=comodel_name,
inverse_name=inverse_name,
string=string,
**kwargs
)
def _setup_regular_full(self, model):
super(One2many, self)._setup_regular_full(model)
if self.inverse_name:
# link self to its inverse field and vice-versa
comodel = model.env[self.comodel_name]
invf = comodel._fields[self.inverse_name]
if isinstance(invf, (Many2one, Many2oneReference)):
# setting one2many fields only invalidates many2one inverses;
# integer inverses (res_model/res_id pairs) are not supported
model._field_inverses.add(self, invf)
comodel._field_inverses.add(invf, self)
_description_relation_field = property(attrgetter('inverse_name'))
def update_db(self, model, columns):
if self.comodel_name in model.env:
comodel = model.env[self.comodel_name]
if self.inverse_name not in comodel._fields:
raise UserError(_("No inverse field %r found for %r") % (self.inverse_name, self.comodel_name))
def get_domain_list(self, records):
comodel = records.env.registry[self.comodel_name]
inverse_field = comodel._fields[self.inverse_name]
domain = super(One2many, self).get_domain_list(records)
if inverse_field.type == 'many2one_reference':
domain = domain + [(inverse_field.model_field, '=', records._name)]
return domain
def __get__(self, records, owner):
if records is not None and self.inverse_name is not None:
# force the computation of the inverse field to ensure that the
# cache value of self is consistent
inverse_field = records.pool[self.comodel_name]._fields[self.inverse_name]
if inverse_field.compute:
records.env[self.comodel_name].recompute([self.inverse_name])
return super().__get__(records, owner)
def read(self, records):
# retrieve the lines in the comodel
context = {'active_test': False}
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
inverse = self.inverse_name
inverse_field = comodel._fields[inverse]
get_id = (lambda rec: rec.id) if inverse_field.type == 'many2one' else int
domain = self.get_domain_list(records) + [(inverse, 'in', records.ids)]
lines = comodel.search(domain, limit=self.limit)
# group lines by inverse field (without prefetching other fields)
group = defaultdict(list)
for line in lines.with_context(prefetch_fields=False):
# line[inverse] may be a record or an integer
group[get_id(line[inverse])].append(line.id)
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write_real(self, records_commands_list, create=False):
""" Update real records. """
# records_commands_list = [(records, commands), ...]
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
ids = {rid for recs, cs in records_commands_list for rid in recs.ids}
records = records_commands_list[0][0].browse(ids)
if self.store:
inverse = self.inverse_name
to_create = [] # line vals to create
to_delete = [] # line ids to delete
to_inverse = {}
allow_full_delete = not create
def unlink(lines):
if getattr(comodel._fields[inverse], 'ondelete', False) == 'cascade':
to_delete.extend(lines._ids)
else:
lines[inverse] = False
def flush():
if to_delete:
# unlink() will remove the lines from the cache
comodel.browse(to_delete).unlink()
to_delete.clear()
if to_create:
# create() will add the new lines to the cache of records
comodel.create(to_create)
to_create.clear()
if to_inverse:
for record, inverse_ids in to_inverse.items():
lines = comodel.browse(inverse_ids)
lines = lines.filtered(lambda line: int(line[inverse]) != record.id)
lines[inverse] = record
for recs, commands in records_commands_list:
for command in (commands or ()):
if command[0] == 0:
for record in recs:
to_create.append(dict(command[2], **{inverse: record.id}))
allow_full_delete = False
elif command[0] == 1:
comodel.browse(command[1]).write(command[2])
elif command[0] == 2:
to_delete.append(command[1])
elif command[0] == 3:
unlink(comodel.browse(command[1]))
elif command[0] == 4:
to_inverse.setdefault(recs[-1], set()).add(command[1])
allow_full_delete = False
elif command[0] in (5, 6) :
# do not try to delete anything in creation mode if nothing has been created before
line_ids = command[2] if command[0] == 6 else []
if not allow_full_delete and not line_ids:
continue
flush()
# assign the given lines to the last record only
lines = comodel.browse(line_ids)
domain = self.get_domain_list(model) + \
[(inverse, 'in', recs.ids), ('id', 'not in', lines.ids)]
unlink(comodel.search(domain))
lines[inverse] = recs[-1]
flush()
else:
cache = records.env.cache
def link(record, lines):
ids = record[self.name]._ids
cache.set(record, self, tuple(unique(ids + lines._ids)))
def unlink(lines):
for record in records:
cache.set(record, self, (record[self.name] - lines)._ids)
for recs, commands in records_commands_list:
for command in (commands or ()):
if command[0] == 0:
for record in recs:
link(record, comodel.new(command[2], ref=command[1]))
elif command[0] == 1:
comodel.browse(command[1]).write(command[2])
elif command[0] == 2:
unlink(comodel.browse(command[1]))
elif command[0] == 3:
unlink(comodel.browse(command[1]))
elif command[0] == 4:
link(recs[-1], comodel.browse(command[1]))
elif command[0] in (5, 6):
# assign the given lines to the last record only
cache.update(recs, self, [()] * len(recs))
lines = comodel.browse(command[2] if command[0] == 6 else [])
cache.set(recs[-1], self, lines._ids)
return records
def write_new(self, records_commands_list):
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
cache = model.env.cache
comodel = model.env[self.comodel_name].with_context(**self.context)
ids = {record.id for records, _ in records_commands_list for record in records}
records = model.browse(ids)
def browse(ids):
return comodel.browse([id_ and NewId(id_) for id_ in ids])
# make sure self is in cache
records[self.name]
if self.store:
inverse = self.inverse_name
# make sure self's inverse is in cache
inverse_field = comodel._fields[inverse]
for record in records:
cache.update(record[self.name], inverse_field, itertools.repeat(record.id))
for recs, commands in records_commands_list:
for command in commands:
if command[0] == 0:
for record in recs:
line = comodel.new(command[2], ref=command[1])
line[inverse] = record
elif command[0] == 1:
browse([command[1]]).update(command[2])
elif command[0] == 2:
browse([command[1]])[inverse] = False
elif command[0] == 3:
browse([command[1]])[inverse] = False
elif command[0] == 4:
browse([command[1]])[inverse] = recs[-1]
elif command[0] == 5:
cache.update(recs, self, itertools.repeat(()))
elif command[0] == 6:
# assign the given lines to the last record only
cache.update(recs, self, itertools.repeat(()))
last, lines = recs[-1], browse(command[2])
cache.set(last, self, lines._ids)
cache.update(lines, inverse_field, itertools.repeat(last.id))
else:
def link(record, lines):
ids = record[self.name]._ids
cache.set(record, self, tuple(unique(ids + lines._ids)))
def unlink(lines):
for record in records:
cache.set(record, self, (record[self.name] - lines)._ids)
for recs, commands in records_commands_list:
for command in commands:
if command[0] == 0:
for record in recs:
link(record, comodel.new(command[2], ref=command[1]))
elif command[0] == 1:
browse([command[1]]).update(command[2])
elif command[0] == 2:
unlink(browse([command[1]]))
elif command[0] == 3:
unlink(browse([command[1]]))
elif command[0] == 4:
link(recs[-1], browse([command[1]]))
elif command[0] in (5, 6):
# assign the given lines to the last record only
cache.update(recs, self, [()] * len(recs))
lines = comodel.browse(command[2] if command[0] == 6 else [])
cache.set(recs[-1], self, lines._ids)
return records
class Many2many(_RelationalMulti):
""" Many2many field; the value of such a field is the recordset.
:param comodel_name: name of the target model (string)
mandatory except in the case of related or extended fields
:param str relation: optional name of the table that stores the relation in
the database
:param str column1: optional name of the column referring to "these" records
in the table ``relation``
:param str column2: optional name of the column referring to "those" records
in the table ``relation``
The attributes ``relation``, ``column1`` and ``column2`` are optional.
If not given, names are automatically generated from model names,
provided ``model_name`` and ``comodel_name`` are different!
Note that having several fields with implicit relation parameters on a
given model with the same comodel is not accepted by the ORM, since
those field would use the same table. The ORM prevents two many2many
fields to use the same relation parameters, except if
- both fields use the same model, comodel, and relation parameters are
explicit; or
- at least one field belongs to a model with ``_auto = False``.
:param domain: an optional domain to set on candidate values on the
client side (domain or string)
:param dict context: an optional context to use on the client side when
handling that field
:param bool check_company: Mark the field to be verified in
:meth:`~odoo.models.Model._check_company`. Add a default company
domain depending on the field attributes.
:param int limit: optional limit to use upon read
"""
type = 'many2many'
_explicit = True # whether schema is explicitly given
relation = None # name of table
column1 = None # column of table referring to model
column2 = None # column of table referring to comodel
auto_join = False # whether joins are generated upon search
limit = None # optional limit to use upon read
ondelete = None # optional ondelete for the column2 fkey
def __init__(self, comodel_name=Default, relation=Default, column1=Default,
column2=Default, string=Default, **kwargs):
super(Many2many, self).__init__(
comodel_name=comodel_name,
relation=relation,
column1=column1,
column2=column2,
string=string,
**kwargs
)
def _setup_regular_base(self, model):
super(Many2many, self)._setup_regular_base(model)
# 3 cases:
# 1) The ondelete attribute is not defined, we assign it a sensible default
# 2) The ondelete attribute is defined and its definition makes sense
# 3) The ondelete attribute is explicitly defined as 'set null' for a m2m,
# this is considered a programming error.
self.ondelete = self.ondelete or 'cascade'
if self.ondelete == 'set null':
raise ValueError(
"The m2m field %s of model %s declares its ondelete policy "
"as being 'set null'. Only 'restrict' and 'cascade' make sense."
% (self.name, model._name)
)
if self.store:
if not (self.relation and self.column1 and self.column2):
self._explicit = False
# table name is based on the stable alphabetical order of tables
comodel = model.env[self.comodel_name]
if not self.relation:
tables = sorted([model._table, comodel._table])
assert tables[0] != tables[1], \
"%s: Implicit/canonical naming of many2many relationship " \
"table is not possible when source and destination models " \
"are the same" % self
self.relation = '%s_%s_rel' % tuple(tables)
if not self.column1:
self.column1 = '%s_id' % model._table
if not self.column2:
self.column2 = '%s_id' % comodel._table
# check validity of table name
check_pg_name(self.relation)
else:
self.relation = self.column1 = self.column2 = None
def _setup_regular_full(self, model):
super(Many2many, self)._setup_regular_full(model)
if self.relation:
m2m = model.pool._m2m
# check whether other fields use the same schema
fields = m2m[(self.relation, self.column1, self.column2)]
for field in fields:
if ( # same model: relation parameters must be explicit
self.model_name == field.model_name and
self.comodel_name == field.comodel_name and
self._explicit and field._explicit
) or ( # different models: one model must be _auto=False
self.model_name != field.model_name and
not (model._auto and model.env[field.model_name]._auto)
):
continue
msg = "Many2many fields %s and %s use the same table and columns"
raise TypeError(msg % (self, field))
fields.append(self)
# retrieve inverse fields, and link them in _field_inverses
for field in m2m[(self.relation, self.column2, self.column1)]:
model._field_inverses.add(self, field)
model.env[field.model_name]._field_inverses.add(field, self)
def update_db(self, model, columns):
cr = model._cr
# Do not reflect relations for custom fields, as they do not belong to a
# module. They are automatically removed when dropping the corresponding
# 'ir.model.field'.
if not self.manual:
model.pool.post_init(model.env['ir.model.relation']._reflect_relation,
model, self.relation, self._module)
comodel = model.env[self.comodel_name]
if not sql.table_exists(cr, self.relation):
query = """
CREATE TABLE "{rel}" ("{id1}" INTEGER NOT NULL,
"{id2}" INTEGER NOT NULL,
PRIMARY KEY("{id1}","{id2}"));
COMMENT ON TABLE "{rel}" IS %s;
CREATE INDEX ON "{rel}" ("{id2}","{id1}");
""".format(rel=self.relation, id1=self.column1, id2=self.column2)
cr.execute(query, ['RELATION BETWEEN %s AND %s' % (model._table, comodel._table)])
_schema.debug("Create table %r: m2m relation between %r and %r", self.relation, model._table, comodel._table)
model.pool.post_init(self.update_db_foreign_keys, model)
def update_db_foreign_keys(self, model):
""" Add the foreign keys corresponding to the field's relation table. """
comodel = model.env[self.comodel_name]
if model._is_an_ordinary_table():
model.pool.add_foreign_key(
self.relation, self.column1, model._table, 'id', 'cascade',
model, self._module, force=False,
)
if comodel._is_an_ordinary_table():
model.pool.add_foreign_key(
self.relation, self.column2, comodel._table, 'id', self.ondelete,
model, self._module,
)
def read(self, records):
context = {'active_test': False}
context.update(self.context)
comodel = records.env[self.comodel_name].with_context(**context)
domain = self.get_domain_list(records)
comodel._flush_search(domain)
wquery = comodel._where_calc(domain)
comodel._apply_ir_rules(wquery, 'read')
order_by = comodel._generate_order_by(None, wquery)
from_c, where_c, where_params = wquery.get_sql()
query = """ SELECT {rel}.{id1}, {rel}.{id2} FROM {rel}, {from_c}
WHERE {where_c} AND {rel}.{id1} IN %s AND {rel}.{id2} = {tbl}.id
{order_by} {limit} OFFSET {offset}
""".format(rel=self.relation, id1=self.column1, id2=self.column2,
tbl=comodel._table, from_c=from_c, where_c=where_c or '1=1',
limit=(' LIMIT %d' % self.limit) if self.limit else '',
offset=0, order_by=order_by)
where_params.append(tuple(records.ids))
# retrieve lines and group them by record
group = defaultdict(list)
records._cr.execute(query, where_params)
for row in records._cr.fetchall():
group[row[0]].append(row[1])
# store result in cache
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(group[record.id]))
def write_real(self, records_commands_list, create=False):
# records_commands_list = [(records, commands), ...]
if not records_commands_list:
return
comodel = records_commands_list[0][0].env[self.comodel_name].with_context(**self.context)
cr = records_commands_list[0][0].env.cr
# determine old and new relation {x: ys}
set = OrderedSet
ids = {rid for recs, cs in records_commands_list for rid in recs.ids}
records = records_commands_list[0][0].browse(ids)
if self.store:
# Using `record[self.name]` generates 2 SQL queries when the value
# is not in cache: one that actually checks access rules for
# records, and the other one fetching the actual data. We use
# `self.read` instead to shortcut the first query.
missing_ids = list(records.env.cache.get_missing_ids(records, self))
if missing_ids:
self.read(records.browse(missing_ids))
old_relation = {record.id: set(record[self.name]._ids) for record in records}
new_relation = {x: set(ys) for x, ys in old_relation.items()}
# determine new relation {x: ys}
new_relation = defaultdict(set)
for x, ys in old_relation.items():
new_relation[x] = set(ys)
# operations on new relation
def relation_add(xs, y):
for x in xs:
new_relation[x].add(y)
def relation_remove(xs, y):
for x in xs:
new_relation[x].discard(y)
def relation_set(xs, ys):
for x in xs:
new_relation[x] = set(ys)
def relation_delete(ys):
# the pairs (x, y) have been cascade-deleted from relation
for ys1 in old_relation.values():
ys1 -= ys
for ys1 in new_relation.values():
ys1 -= ys
for recs, commands in records_commands_list:
to_create = [] # line vals to create
to_delete = [] # line ids to delete
for command in (commands or ()):
if not isinstance(command, (list, tuple)) or not command:
continue
if command[0] == 0:
to_create.append((recs._ids, command[2]))
elif command[0] == 1:
comodel.browse(command[1]).write(command[2])
elif command[0] == 2:
to_delete.append(command[1])
elif command[0] == 3:
relation_remove(recs._ids, command[1])
elif command[0] == 4:
relation_add(recs._ids, command[1])
elif command[0] in (5, 6):
# new lines must no longer be linked to records
to_create = [(set(ids) - set(recs._ids), vals) for (ids, vals) in to_create]
relation_set(recs._ids, command[2] if command[0] == 6 else ())
if to_create:
# create lines in batch, and link them
lines = comodel.create([vals for ids, vals in to_create])
for line, (ids, vals) in zip(lines, to_create):
relation_add(ids, line.id)
if to_delete:
# delete lines in batch
comodel.browse(to_delete).unlink()
relation_delete(to_delete)
# update the cache of self
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(new_relation[record.id]))
# process pairs to add (beware of duplicates)
pairs = [(x, y) for x, ys in new_relation.items() for y in ys - old_relation[x]]
if pairs:
if self.store:
query = "INSERT INTO {} ({}, {}) VALUES {} ON CONFLICT DO NOTHING".format(
self.relation, self.column1, self.column2, ", ".join(["%s"] * len(pairs)),
)
cr.execute(query, pairs)
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records._field_inverses[self]:
domain = invf.get_domain_list(comodel)
valid_ids = set(records.filtered_domain(domain)._ids)
if not valid_ids:
continue
for y, xs in y_to_xs.items():
corecord = comodel.browse(y)
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(set(ids0) | (xs & valid_ids))
cache.set(corecord, invf, ids1)
except KeyError:
pass
# process pairs to remove
pairs = [(x, y) for x, ys in old_relation.items() for y in ys - new_relation[x]]
if pairs:
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
if self.store:
# express pairs as the union of cartesian products:
# pairs = [(1, 11), (1, 12), (1, 13), (2, 11), (2, 12), (2, 14)]
# -> y_to_xs = {11: {1, 2}, 12: {1, 2}, 13: {1}, 14: {2}}
# -> xs_to_ys = {{1, 2}: {11, 12}, {2}: {14}, {1}: {13}}
xs_to_ys = defaultdict(set)
for y, xs in y_to_xs.items():
xs_to_ys[frozenset(xs)].add(y)
# delete the rows where (id1 IN xs AND id2 IN ys) OR ...
COND = "{} IN %s AND {} IN %s".format(self.column1, self.column2)
query = "DELETE FROM {} WHERE {}".format(
self.relation, " OR ".join([COND] * len(xs_to_ys)),
)
params = [arg for xs, ys in xs_to_ys.items() for arg in [tuple(xs), tuple(ys)]]
cr.execute(query, params)
# update the cache of inverse fields
for invf in records._field_inverses[self]:
for y, xs in y_to_xs.items():
corecord = comodel.browse(y)
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(id_ for id_ in ids0 if id_ not in xs)
cache.set(corecord, invf, ids1)
except KeyError:
pass
return records.filtered(
lambda record: new_relation[record.id] != old_relation[record.id]
)
def write_new(self, records_commands_list):
""" Update self on new records. """
if not records_commands_list:
return
model = records_commands_list[0][0].browse()
comodel = model.env[self.comodel_name].with_context(**self.context)
new = lambda id_: id_ and NewId(id_)
# determine old and new relation {x: ys}
set = OrderedSet
old_relation = {record.id: set(record[self.name]._ids) for records, _ in records_commands_list for record in records}
new_relation = {x: set(ys) for x, ys in old_relation.items()}
ids = set(old_relation.keys())
records = model.browse(ids)
for recs, commands in records_commands_list:
for command in commands:
if not isinstance(command, (list, tuple)) or not command:
continue
if command[0] == 0:
line_id = comodel.new(command[2], ref=command[1]).id
for line_ids in new_relation.values():
line_ids.add(line_id)
elif command[0] == 1:
line_id = new(command[1])
comodel.browse([line_id]).update(command[2])
elif command[0] == 2:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.discard(line_id)
elif command[0] == 3:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.discard(line_id)
elif command[0] == 4:
line_id = new(command[1])
for line_ids in new_relation.values():
line_ids.add(line_id)
elif command[0] in (5, 6):
# new lines must no longer be linked to records
line_ids = command[2] if command[0] == 6 else ()
line_ids = set(new(line_id) for line_id in line_ids)
for id_ in recs._ids:
new_relation[id_] = set(line_ids)
if new_relation == old_relation:
return records.browse()
# update the cache of self
cache = records.env.cache
for record in records:
cache.set(record, self, tuple(new_relation[record.id]))
# process pairs to add (beware of duplicates)
pairs = [(x, y) for x, ys in new_relation.items() for y in ys - old_relation[x]]
if pairs:
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records._field_inverses[self]:
domain = invf.get_domain_list(comodel)
valid_ids = set(records.filtered_domain(domain)._ids)
if not valid_ids:
continue
for y, xs in y_to_xs.items():
corecord = comodel.browse([y])
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(set(ids0) | (xs & valid_ids))
cache.set(corecord, invf, ids1)
except KeyError:
pass
# process pairs to remove
pairs = [(x, y) for x, ys in old_relation.items() for y in ys - new_relation[x]]
if pairs:
# update the cache of inverse fields
y_to_xs = defaultdict(set)
for x, y in pairs:
y_to_xs[y].add(x)
for invf in records._field_inverses[self]:
for y, xs in y_to_xs.items():
corecord = comodel.browse([y])
try:
ids0 = cache.get(corecord, invf)
ids1 = tuple(id_ for id_ in ids0 if id_ not in xs)
cache.set(corecord, invf, ids1)
except KeyError:
pass
return records.filtered(
lambda record: new_relation[record.id] != old_relation[record.id]
)
class Id(Field):
""" Special case for field 'id'. """
type = 'integer'
column_type = ('int4', 'int4')
string = 'ID'
store = True
readonly = True
prefetch = False
def update_db(self, model, columns):
pass # this column is created with the table
def __get__(self, record, owner):
if record is None:
return self # the field is accessed through the class owner
# the code below is written to make record.id as quick as possible
ids = record._ids
size = len(ids)
if size == 0:
return False
elif size == 1:
return ids[0]
raise ValueError("Expected singleton: %s" % record)
def __set__(self, record, value):
raise TypeError("field 'id' cannot be assigned")
def prefetch_many2one_ids(record, field):
""" Return an iterator over the ids of the cached values of a many2one
field for the prefetch set of a record.
"""
records = record.browse(record._prefetch_ids)
ids = record.env.cache.get_values(records, field)
return unique(id_ for id_ in ids if id_ is not None)
def prefetch_x2many_ids(record, field):
""" Return an iterator over the ids of the cached values of an x2many
field for the prefetch set of a record.
"""
records = record.browse(record._prefetch_ids)
ids_list = record.env.cache.get_values(records, field)
return unique(id_ for ids in ids_list for id_ in ids)
def apply_required(model, field_name):
""" Set a NOT NULL constraint on the given field, if necessary. """
# At the time this function is called, the model's _fields may have been reset, although
# the model's class is still the same. Retrieve the field to see whether the NOT NULL
# constraint still applies
field = model._fields[field_name]
if field.store and field.required:
sql.set_not_null(model.env.cr, model._table, field_name)
# imported here to avoid dependency cycle issues
from .exceptions import AccessError, MissingError, UserError
from .models import check_pg_name, BaseModel, NewId, IdType, expand_ids, PREFETCH_MAX
| agpl-3.0 | 7,060,470,593,801,443,000 | 41.981563 | 132 | 0.57512 | false | 4.373913 | false | false | false |
kansanmuisti/kamu | parliament/models/funding.py | 1 | 1287 | from django.db import models
from django.db.models import Q
from django.utils.translation import ugettext_lazy as _
from parliament.models.member import Member
from parliament.models.session import Term
class FundingSource(models.Model):
TYPES = (
('co', _('Corporation')),
('ind', _('Individual')),
('party', _('Party')),
)
name = models.CharField(max_length=120, null=True, blank=True)
class Meta:
app_label = 'parliament'
class Funding(models.Model):
TYPES = (
('own', _('Own funds')),
('co', _('Corporation')),
('ind', _('Individual')),
('loan', _('Loan')),
('u_ind', _('Undefined individuals')),
('u_com', _('Undefined communities')),
('party', _('Party')),
('oth', _('Other')),
)
type = models.CharField(max_length=6, choices=TYPES)
member = models.ForeignKey(Member, on_delete=models.CASCADE, db_index=True)
term = models.ForeignKey(Term, on_delete=models.CASCADE)
source = models.ForeignKey(FundingSource, on_delete=models.CASCADE, null=True, blank=True)
amount = models.DecimalField(max_digits=10, decimal_places=2)
class Meta:
app_label = 'parliament'
unique_together = (('member', 'term', 'type', 'source'),)
| agpl-3.0 | -1,693,515,536,714,444,500 | 31.175 | 94 | 0.608392 | false | 3.677143 | false | false | false |
dc3-plaso/dfvfs | dfvfs/path/fvde_path_spec.py | 1 | 2024 | # -*- coding: utf-8 -*-
"""The FileVault Drive Encryption (FVDE) path specification implementation."""
from dfvfs.lib import definitions
from dfvfs.path import factory
from dfvfs.path import path_spec
class FVDEPathSpec(path_spec.PathSpec):
"""Class that implements the FVDE path specification.
Attributes:
encrypted_root_plist (str): path to the EncryptedRoot.plist.wipekey file.
password (str): password.
recovery_password (str): recovery password.
"""
TYPE_INDICATOR = definitions.TYPE_INDICATOR_FVDE
def __init__(
self, encrypted_root_plist=None, password=None, parent=None,
recovery_password=None, **kwargs):
"""Initializes the path specification.
Note that the FVDE path specification must have a parent.
Args:
encrypted_root_plist (Optionla[str]): path to the
EncryptedRoot.plist.wipekey file.
password (Optional[str]): password.
parent (Optional[PathSpec]): parent path specification.
recovery_password (Optional[str]): recovery password.
Raises:
ValueError: when parent is not set.
"""
if not parent:
raise ValueError(u'Missing parent value.')
super(FVDEPathSpec, self).__init__(parent=parent, **kwargs)
self.encrypted_root_plist = encrypted_root_plist
self.password = password
self.recovery_password = recovery_password
@property
def comparable(self):
"""str: comparable representation of the path specification."""
string_parts = []
if self.encrypted_root_plist:
string_parts.append(u'encrypted_root_plist: {0:s}'.format(
self.encrypted_root_plist))
if self.password:
string_parts.append(u'password: {0:s}'.format(self.password))
if self.recovery_password:
string_parts.append(u'recovery_password: {0:s}'.format(
self.recovery_password))
return self._GetComparable(sub_comparable_string=u', '.join(string_parts))
# Register the path specification with the factory.
factory.Factory.RegisterPathSpec(FVDEPathSpec)
| apache-2.0 | 3,737,688,749,816,931,300 | 31.126984 | 78 | 0.700593 | false | 3.907336 | false | false | false |
emilydolson/avida-spatial-tools | avidaspatial/utils.py | 1 | 11662 | # This file contains functions that are used throuhgout avida-spatial-tools
from math import sqrt, log, floor, ceil
from copy import deepcopy
import pysal
import numpy as np
from .environment_file import *
import seaborn as sns
def get_kwargs(grid, kwargs, phenotypes=False):
"""
Helper function to figure out what denom and palette to use, based on the
kwargs and the grid being plotted. The optional (default: false) argument
indicates whether the grid contains phenotypes, as opposed to resources.
"""
denom = None
if "denom" in kwargs:
denom = kwargs["denom"]
if "palette" in kwargs:
palette = kwargs["palette"]
if denom is None:
denom = len(palette)
elif "environment" in kwargs or isinstance(grid, EnvironmentFile):
if "environment" in kwargs:
env = kwargs["environment"]
else:
env = grid
if phenotypes:
palette = env.task_palette
if denom is None:
denom = len(env.tasks)
else:
palette = env.resource_palette
if denom is None:
denom = len(env.resources)
else:
length = get_pallete_length(grid)
palette = sns.hls_palette(length, s=1)
denom = length
return denom, palette
def get_pallete_length(grid):
"""
Takes a 2d grid and figures out how many different elements are in it, so
that we know how big to make the palette. Also avoids the unfortunate
red/green palette that results from too few elements.
Returns int indicating the length the palette should have.
"""
elements = list(set(flatten_array(grid)))
length = len(elements)
if type(elements[0]) is str:
lengths = [len(el) for el in elements if not el.startswith("-")]
if max(lengths) < 5: # Mixing red and green
length += 2 # is not pretty so let's avoid it
return length
def agg_grid(grid, agg=None):
"""
Many functions return a 2d list with a complex data type in each cell.
For instance, grids representing environments have a set of resources,
while reading in multiple data files at once will yield a list
containing the values for that cell from each file. In order to visualize
these data types it is helpful to summarize the more complex data types
with a single number. For instance, you might want to take the length
of a resource set to see how many resource types are present. Alternately,
you might want to take the mode of a list to see the most common phenotype
in a cell.
This function facilitates this analysis by calling the given aggregation
function (agg) on each cell of the given grid and returning the result.
agg - A function indicating how to summarize grid contents. Default: len.
"""
grid = deepcopy(grid)
if agg is None:
if type(grid[0][0]) is list and type(grid[0][0][0]) is str:
agg = string_avg
else:
agg = mode
for i in range(len(grid)):
for j in range(len(grid[i])):
grid[i][j] = agg(grid[i][j])
return grid
def slice_3d_grid(grid, n):
"""
Takes a three dimensional array and an integer (n) and returns a 2d array
containing the Nth value from the 3rd dimension at each location in the
grid.
"""
phen_grid = initialize_grid((len(grid[0]), len(grid)), 0)
for i in range(len(grid)):
for j in range(len(grid[i])):
phen_grid[i][j] = grid[i][j][n]
return phen_grid
def flatten_array(grid):
"""
Takes a multi-dimensional array and returns a 1 dimensional array with the
same contents.
"""
grid = [grid[i][j] for i in range(len(grid)) for j in range(len(grid[i]))]
while type(grid[0]) is list:
grid = flatten_array(grid)
return grid
def prepend_zeros_to_lists(ls):
"""
Takes a list of lists and appends 0s to the beggining of each sub_list
until they are all the same length. Used for sign-extending binary numbers.
"""
longest = max([len(l) for l in ls])
for i in range(len(ls)):
while len(ls[i]) < longest:
ls[i].insert(0, "0")
def dict_increment(d, key, amount):
if key in d:
d[key] += amount
else:
d[key] = amount
def squared_toroidal_dist(p1, p2, world_size=(60, 60)):
"""
Separated out because sqrt has a lot of overhead
"""
halfx = world_size[0]/2.0
if world_size[0] == world_size[1]:
halfy = halfx
else:
halfy = world_size[1]/2.0
deltax = p1[0] - p2[0]
if deltax < -halfx:
deltax += world_size[0]
elif deltax > halfx:
deltax -= world_size[0]
deltay = p1[1] - p2[1]
if deltay < -halfy:
deltay += world_size[1]
elif deltay > halfy:
deltay -= world_size[1]
return deltax*deltax + deltay*deltay
def toroidal_dist(p1, p2, world_size=(60, 60)):
return sqrt(squared_toroidal_dist(p1, p2, world_size))
# return sqrt(min((p1[0] - p2[0])**2, (p1[0]+world_x - p2[0])**2) + \
# min((p1[1] - p2[1])**2, (p1[1]+world_y - p2[1])**2))
def dist(p1, p2):
"""
Returns the distance between the two given tuples.
"""
return sqrt((p1[0]-p2[0])**2 + (p1[1]-p2[1])**2)
def function_with_args(func, *args):
"""
Returns a function that calls a function with the specified arguments.
The returned function still takes one argument representing the first
positional argument.
This is mostly a helper function for using agg_grid with functions
requiring more information than the cell contents.
"""
def inner(arg):
return func(arg, *args)
return inner
def convert_world_to_phenotype(world):
"""
Converts sets indicating the resources present in a single cell to binary
strings (bit order is based on the order of resources in world.resources).
TODO: Figure out how to handle relationship between resources and tasks
Inputs: world - an EnvironmentFile object with a grid of resource sets
Returns: an EnvironmentFile object with a grid of binary strings
"""
if set(world.resources) != set(world.tasks):
print("Warning: world phenotypes don't correspond to phenotypes")
if set(world.resources).issubset(set(world.tasks)):
conversion_func = function_with_args(res_set_to_phenotype, world.tasks)
else:
conversion_func = \
function_with_args(res_set_to_phenotype, world.resources)
grid = agg_grid(deepcopy(world), conversion_func)
return grid
def phenotype_to_res_set(phenotype, resources):
"""
Converts a binary string to a set containing the resources indicated by
the bits in the string.
Inputs: phenotype - a binary string
resources - a list of string indicating which resources correspond
to which indices of the phenotype
returns: A set of strings indicating resources
"""
assert(phenotype[0:2] == "0b")
phenotype = phenotype[2:]
# Fill in leading zeroes
while len(phenotype) < len(resources):
phenotype = "0" + phenotype
res_set = set()
for i in range(len(phenotype)):
if phenotype[i] == "1":
res_set.add(resources[i])
assert(phenotype.count("1") == len(res_set))
return res_set
def res_set_to_phenotype(res_set, full_list):
"""
Converts a set of strings indicating resources to a binary string where
the positions of 1s indicate which resources are present.
Inputs: res_set - a set of strings indicating which resources are present
full_list - a list of strings indicating all resources which could
could be present, and the order in which they should
map to bits in the phenotype
returns: A binary string
"""
full_list = list(full_list)
phenotype = len(full_list) * ["0"]
for i in range(len(full_list)):
if full_list[i] in res_set:
phenotype[i] = "1"
assert(phenotype.count("1") == len(res_set))
# Remove uneceesary leading 0s
while phenotype[0] == "0" and len(phenotype) > 1:
phenotype = phenotype[1:]
return "0b"+"".join(phenotype)
def weighted_hamming(b1, b2):
"""
Hamming distance that emphasizes differences earlier in strings.
"""
assert(len(b1) == len(b2))
hamming = 0
for i in range(len(b1)):
if b1[i] != b2[i]:
# differences at more significant (leftward) bits
# are more important
if i > 0:
hamming += 1 + 1.0/i
# This weighting is completely arbitrary
return hamming
def n_tasks(dec_num):
"""
Takes a decimal number as input and returns the number of ones in the
binary representation.
This translates to the number of tasks being done by an organism with a
phenotype represented as a decimal number.
"""
bitstring = ""
try:
bitstring = dec_num[2:]
except:
bitstring = bin(int(dec_num))[2:] # cut off 0b
# print bin(int(dec_num)), bitstring
return bitstring.count("1")
def convert_to_pysal(data):
"""
Pysal expects a distance matrix, and data formatted in a numpy array.
This functions takes a data grid and returns those things.
"""
w = pysal.lat2W(len(data[0]), len(data))
data = np.array(data)
data = np.reshape(data, (len(data)*len(data[0]), 1))
return w, data
# ~~~~~~~~~~~~~~~~~~~~~~AGGREGATION FUNCTIONS~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Provided for easy use with agg_grid
def mode(ls):
"""
Takes a list as an argument and returns the mode of (most common item in)
that list.
"""
return max(set(ls), key=ls.count)
def mean(ls):
"""
Takes a list and returns the mean.
"""
return float(sum(ls))/len(ls)
def median(ls):
"""
Takes a list and returns the median.
"""
ls = sorted(ls)
return ls[int(floor(len(ls)/2.0))]
def string_avg(strings, binary=True):
"""
Takes a list of strings of equal length and returns a string containing
the most common value from each index in the string.
Optional argument: binary - a boolean indicating whether or not to treat
strings as binary numbers (fill in leading zeros if lengths differ).
"""
if binary: # Assume this is a binary number and fill leading zeros
strings = deepcopy(strings)
longest = len(max(strings, key=len))
for i in range(len(strings)):
while len(strings[i]) < longest:
split_string = strings[i].split("b")
strings[i] = "0b0" + split_string[1]
avg = ""
for i in (range(len(strings[0]))):
opts = []
for s in strings:
opts.append(s[i])
avg += max(set(opts), key=opts.count)
return avg
def get_world_dimensions(gridfile, delim=" "):
"""
This function takes the name of a file in grid_task format and returns
the dimensions of the world it represents.
"""
infile = open(gridfile)
lines = infile.readlines()
infile.close()
world_x = len(lines[0].strip().split(delim))
world_y = len(lines)
return (world_x, world_y)
def initialize_grid(world_size, inner):
"""
Creates an empty grid (2d list) with the dimensions specified in
world_size. Each element is initialized to the inner argument.
"""
data = []
for i in range(world_size[1]):
data.append([])
for j in range(world_size[0]):
data[i].append(deepcopy(inner))
return data
| mit | -5,382,053,387,468,629,000 | 28.6743 | 79 | 0.621763 | false | 3.727069 | false | false | false |
squaresLab/Houston | experiments/filter_truth.py | 1 | 2853 | from typing import Iterator, Tuple, Set, List, Dict, Any, Optional, Type
import argparse
import logging
import sys
import os
import concurrent.futures
from ruamel.yaml import YAML
import yaml
from houston.mission import Mission
from compare_traces import load_file as load_traces_file
from compare_traces import is_truth_valid
logger = logging.getLogger('houston') # type: logging.Logger
logger.setLevel(logging.DEBUG)
DESCRIPTION = "Filter out ground truth data."
VALID_LIST_OUTPUT = "valid_list.yml"
def setup_logging(verbose: bool = False) -> None:
log_to_stdout = logging.StreamHandler()
log_to_stdout.setLevel(logging.DEBUG if verbose else logging.INFO)
logging.getLogger('houston').addHandler(log_to_stdout)
logging.getLogger('experiment').addHandler(log_to_stdout)
def parse_args():
p = argparse.ArgumentParser(description=DESCRIPTION)
p.add_argument('oracle', type=str, help='path to oracle trace directory.')
p.add_argument('--threads', type=int, default=1,
help='number of threads')
p.add_argument('--verbose', action='store_true',
help='increases logging verbosity')
return p.parse_args()
def validate_truth(dir_oracle: str, fn_trace: str) -> bool:
mission, oracle_traces = load_traces_file(os.path.join(dir_oracle, fn_trace))
oracle_traces = [t for t in oracle_traces if t.commands]
return is_truth_valid(oracle_traces, 3), fn_trace
def filter_truth_traces(dir_oracle: str,
threads: int) -> List[str]:
trace_filenames = \
[fn for fn in os.listdir(dir_oracle) if fn.endswith('.json')]
valid_traces = []
futures = []
with concurrent.futures.ProcessPoolExecutor(threads) as e:
for fn in trace_filenames:
future = e.submit(validate_truth, dir_oracle, fn)
futures.append(future)
logger.debug("submitted all candidates")
for future in concurrent.futures.as_completed(futures):
valid, trace = future.result()
if valid:
valid_traces.append(trace)
logger.info("trace %s is valid", trace)
else:
logger.info("trace %s is invalid", trace)
logger.debug("finished all")
return valid_traces
def main():
args = parse_args()
setup_logging(verbose=args.verbose)
dir_oracle = args.oracle
if not os.path.exists(dir_oracle):
logger.error("oracle directory not found: %s", dir_oracle)
sys.exit(1)
# obtain a list of oracle traces
trace_filenames = filter_truth_traces(dir_oracle, threads=args.threads)
logger.info("Total number of %d valid truth", len(trace_filenames))
with open(os.path.join(dir_oracle, VALID_LIST_OUTPUT), "w") as f:
YAML().dump(trace_filenames, f)
if __name__ == '__main__':
main()
| mit | -7,573,749,851,091,781,000 | 31.793103 | 81 | 0.661059 | false | 3.753947 | false | false | false |
FluVigilanciaBR/seasonality | methods/mem/sinan_mem_inset_thresholds.py | 1 | 45032 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
import rpy2.robjects as ro
from numpy import *
from pandas import *
from rpy2.robjects import pandas2ri
from rpy2.robjects.packages import importr
pandas2ri.activate()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import argparse
import logging
from argparse import RawDescriptionHelpFormatter
import matplotlib.font_manager as fm
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import matplotlib.ticker as ticker
from scipy.stats.mstats import gmean
module_logger = logging.getLogger('update_system.sinan_mem_inset_thresholds')
# Load R MEM package:
try:
mem = importr('mem')
except:
mem = importr('mem', lib_loc="/home/marfcg/R/x86_64-pc-linux-gnu-library/4.0")
try:
ro.r.require('mem')
except:
ro.r.require('mem', lib_loc="/home/marfcg/R/x86_64-pc-linux-gnu-library/4.0")
# UF codes
tabela_ufnome = {'11': 'Rondônia',
'12': 'Acre',
'13': 'Amazonas',
'14': 'Roraima',
'15': 'Pará',
'16': 'Amapá',
'17': 'Tocantins',
'21': 'Maranhão',
'22': 'Piauí',
'23': 'Ceará',
'24': 'Rio Grande do Norte',
'25': 'Paraíba',
'26': 'Pernambuco',
'27': 'Alagoas',
'28': 'Sergipe',
'29': 'Bahia',
'31': 'Minas Gerais',
'32': 'Espírito Santo',
'33': 'Rio de Janeiro',
'35': 'São Paulo',
'41': 'Paraná',
'42': 'Santa Catarina',
'43': 'Rio Grande do Sul',
'50': 'Mato Grosso do Sul',
'51': 'Mato Grosso',
'52': 'Goiás',
'53': 'Distrito Federal',
'RegN': 'Regional Norte',
'RegC': 'Regional Centro',
'RegL': 'Regional Leste',
'RegS': 'Regional Sul',
'BR': 'Brasil',
'S': 'Região Sul',
'N': 'Região Norte',
'CO': 'Região Centro-oeste',
'NE': 'Região Nordeste',
'SE': 'Região Sudeste'}
tabela_ufcod = {v: k for k, v in tabela_ufnome.items()}
fontproplgd = fm.FontProperties('Oswald')
fontproplgd.set_size(28)
fontproplbl = fm.FontProperties('Oswald')
fontproplbl.set_size(42)
fontproplblinset = fm.FontProperties('Oswald')
fontproplblinset.set_size(30)
fontpropticks = fontproplblinset.copy()
fontpropticks.set_size(24)
fontpropticksinset = fontpropticks.copy()
fontpropticksinset.set_size(20)
def to_bool(v):
if v in [True, 'T', 't', 'True', 'true', 1, '1']:
v = True
else:
v = False
return v
def discardseasons(df, seasons, gdthres=2.0, smin=5):
"""
Calculate peak variability in order to keep only seasons with relatively low variability rdthres.
Always mantain at least smin seasons.
:param df: data frame with seasons by columns
:param seasons: list of column names corresponding to each season
:param gdthres: maximum geometric deviation from median
:param smin: minimum number of seasons maintained
:return drop_seasons: list with seasons to be dropped
"""
drop_seasons = []
seasons = seasons.copy()
# Drop null seasons
series = df[seasons].max()
drop_seasons = list(series[series == 0].index)
series.drop(drop_seasons, axis=0, inplace=True)
# If resulting data contains less than smin seasons, return
nseasons = len(series)
nmax = nseasons - smin
if nmax <= 0:
return drop_seasons
####### Test removing one by one ######
# Take log of geometric deviation threshold for simplicity
gdthres = np.log(gdthres)
for n in range(nmax):
# Current maxima
tmp_series = df[list(set(seasons).difference(drop_seasons))].max()
# Grab current geometric mean
series_gmean = np.log(gmean(tmp_series))
# Calculate maximum geometric deviation from geometric mean
mgd = abs(np.log(tmp_series) - series_gmean).max()
if mgd > gdthres:
idx = abs(np.log(tmp_series) - series_gmean).idxmax()
drop_seasons.append(idx)
return drop_seasons
def applymem(df, discarded_seasons=None, wdw_method=2, lower_bound=5.0):
#rdf = pandas2ri.py2ri(df)
rdf = ro.conversion.py2rpy(df)
seasons = sorted(list(df.columns))
# Discard 2009 season if present:
seasons = sorted(set(seasons).difference(discarded_seasons))
rseasons = ro.StrVector(seasons)
ro.globalenv['df'] = rdf
ro.globalenv['seasons'] = rseasons
# # Method for obtaining typical time series evolution (default 2)
# ro.globalenv['par.type.curve'] = 2
# # Method for obtaining pre/post-epidemic threshold (default 4)
# ro.globalenv['par.type.threshold'] = 2
# # Method for obtaining intensity thresholds (default 4)
# ro.globalenv['par.type.intensity'] = 2
# # Method for obtaining outbreak start and length (default 6)
# ro.globalenv['par.type.other'] = 2
# # Total number of points to obtain pre/post-threshold (will take n/seasons from each)
# ro.globalenv['par.n.max'] = 30
# # Confidence interval for modelled curve
# ro.globalenv['par.level.curve'] = 0.90
# # Confidence interval for pre/post-thresold
# ro.globalenv['par.level.threshold'] = 0.95
# # Quantiles for intensity thresholds
# ro.globalenv['par.level.intensity'] = ro.FloatVector([0.40, 0.90, 0.975])
#
# epimemrslt = ro.r('memmodel(i.data=subset(df, select=seasons), i.type.curve=par.type.curve,' +
# 'i.type.threshold=par.type.threshold, i.type.intensity=par.type.intensity,' +
# 'i.type.other=par.type.other, i.n.max=par.n.max, i.level.curve=par.level.curve,' +
# 'i.level.threshold=par.level.threshold, i.level.intensity=par.level.intensity)')
ro.globalenv['df'] = rdf
ro.globalenv['seasons'] = rseasons
ro.globalenv['par.method'] = wdw_method
ro.globalenv['par.type.curve'] = 2
ro.globalenv['par.n.max'] = 20
ro.globalenv['par.level.curve'] = 0.95
ro.globalenv['par.level.threshold'] = 0.95
ro.globalenv['par.type.intensity'] = 6
ro.globalenv['par.level.intensity'] = ro.FloatVector([0.40, 0.90, 0.975])
epimemrslt = ro.r('memmodel(i.data=subset(df, select=seasons), i.type.curve=par.type.curve, i.method=par.method,' +
'i.n.max=par.n.max, i.level.curve=par.level.curve, i.level.threshold=par.level.threshold,' +
'i.type.intensity=par.type.intensity, i.level.intensity=par.level.intensity)')
# Pre-epidemic threshold:
epithreshold = max(lower_bound, epimemrslt.rx2('pre.post.intervals')[0, 2])
typrealcurve = pd.DataFrame(epimemrslt.rx2('typ.real.curve'))
# Check for seasons below threshold:
dropseasons = set()
for s in seasons:
if df[s].max() < epithreshold:
dropseasons.add(s)
# Drop seasons below threshold and rerun algorithm:
episeasons = list(seasons)
if len(dropseasons) > 0 and len(dropseasons) < len(seasons):
episeasons = sorted(list(set(seasons).difference(dropseasons)))
ro.globalenv['episeasons'] = ro.StrVector(episeasons)
# epimemrslt = ro.r('memmodel(i.data=subset(df, select=episeasons), i.type.curve=par.type.curve,' +
# 'i.type.threshold=par.type.threshold, i.type.intensity=par.type.intensity,' +
# 'i.type.other=par.type.other, i.n.max=par.n.max, i.level.curve=par.level.curve,' +
# 'i.level.threshold=par.level.threshold, i.level.intensity=par.level.intensity)')
epimemrslt = ro.r('memmodel(i.data=df[episeasons], i.type.curve=par.type.curve,' +
'i.method=par.method,' +
'i.n.max=par.n.max, i.level.curve=par.level.curve, i.level.threshold=par.level.threshold,' +
'i.type.intensity=par.type.intensity, i.level.intensity=par.level.intensity)')
# Store results in python dictionary of objects
pyepimemrslt = {}
tgt_names = [
'pre.post.intervals',
'mean.start',
'ci.start',
'mean.length',
'ci.length',
'epi.intervals',
'typ.real.curve',
'typ.curve',
'moving.epidemics',
'n.seasons'
]
for name in tgt_names:
rdata = epimemrslt.rx2(name)
if name == 'call':
pyepimemrslt.update({name: str(rdata)})
elif ndim(rdata) == 1:
pyepimemrslt.update({name: rdata[0]})
else:
pyepimemrslt.update({name: pd.DataFrame(rdata)})
# typ.curve is the typical curve obtained from averaging over epidemic seasons with time rescaled
# so that the start of the epidemic period coincides with mean.start
pyepimemrslt['typ.curve'].rename(columns={0: 'baixo', 1: 'mediano', 2: 'alto'}, inplace=True)
pyepimemrslt['typ.curve']['mediano'].fillna(0, inplace=True)
pyepimemrslt['typ.curve']['baixo'] = pyepimemrslt['typ.curve']['baixo'].where(
pyepimemrslt['typ.curve']['baixo'] >= 0,
other=0)
pyepimemrslt['typ.curve']['baixo'] = pyepimemrslt['typ.curve']['baixo']. \
where((-pyepimemrslt['typ.curve']['baixo'].isnull()), other=pyepimemrslt['typ.curve']['mediano'])
pyepimemrslt['typ.curve']['alto'] = pyepimemrslt['typ.curve']['alto']. \
where((-pyepimemrslt['typ.curve']['alto'].isnull()), other=pyepimemrslt['typ.curve']['mediano'])
pyepimemrslt['pre.post.intervals'].rename(index={0: 'pre', 1: 'post'}, inplace=True)
# typ.real.curve is the typical curve without time shift, that is, respecting the original weeks from data
# this curve is better to keep all seasons, not only the epidemic ones.
pyepimemrslt['typ.real.curve'] = typrealcurve.copy()
pyepimemrslt['typ.real.curve'].rename(columns={0: 'baixo', 1: 'mediano', 2: 'alto'}, inplace=True)
pyepimemrslt['typ.real.curve']['mediano'].fillna(0, inplace=True)
pyepimemrslt['typ.real.curve'].loc[pyepimemrslt['typ.real.curve']['baixo'] < 0, 'baixo'] = 0
pyepimemrslt['typ.real.curve']['baixo'] = pyepimemrslt['typ.real.curve']['baixo']. \
where((-pyepimemrslt['typ.real.curve']['baixo'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
pyepimemrslt['typ.real.curve']['alto'] = pyepimemrslt['typ.real.curve']['alto']. \
where((-pyepimemrslt['typ.real.curve']['alto'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
newcols = {}
for k, v in enumerate(episeasons):
newcols[k] = str(v) + ' transladado'
pyepimemrslt['moving.epidemics'].rename(columns=newcols, inplace=True)
return pyepimemrslt, dropseasons
def extract_typ_real_curve(df, discarded_seasons=None, wdw_method=2, lower_bound=5.0):
seasons = sorted(list(df.columns))
seasons = sorted(set(seasons).difference(discarded_seasons))
#rdf = pandas2ri.py2ri(df)
rdf = ro.conversion.py2rpy(df)
rseasons = ro.StrVector(seasons)
ro.globalenv['df'] = rdf
ro.globalenv['seasons'] = rseasons
ro.globalenv['par.method'] = wdw_method
ro.globalenv['par.type.curve'] = 2
ro.globalenv['par.level.curve'] = 0.95
epimemrslt = ro.r('t(apply(subset(df, select=seasons), 1, memci, i.type.curve=par.type.curve, ' +
'i.level.curve=par.level.curve))')
# Pre-epidemic threshold:
typrealcurve = pd.DataFrame(epimemrslt)
# Store results in python dictionary of objects
pyepimemrslt = {}
# typ.real.curve is the typical curve without time shift, that is, respecting the original weeks from data
# this curve is better to keep all seasons, not only the epidemic ones.
pyepimemrslt['typ.real.curve'] = typrealcurve.copy()
pyepimemrslt['typ.real.curve'].rename(columns={0: 'baixo', 1: 'mediano', 2: 'alto'}, inplace=True)
pyepimemrslt['typ.real.curve']['mediano'].fillna(0, inplace=True)
pyepimemrslt['typ.real.curve'].loc[pyepimemrslt['typ.real.curve']['baixo'] < 0, 'baixo'] = 0
pyepimemrslt['typ.real.curve']['baixo'] = pyepimemrslt['typ.real.curve']['baixo']. \
where((-pyepimemrslt['typ.real.curve']['baixo'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
pyepimemrslt['typ.real.curve']['alto'] = pyepimemrslt['typ.real.curve']['alto']. \
where((-pyepimemrslt['typ.real.curve']['alto'].isnull()), other=pyepimemrslt['typ.real.curve']['mediano'])
return pyepimemrslt
def plotmemcurve(uf, dftmp, dftmpinset, thresholds, seasons, lastseason, epicols):
sns.set_style('darkgrid')
sns.set_context("talk")
sns.set_palette('Set2', len(seasons) + 4)
colorcode = sns.color_palette('Set2', len(seasons) + 4)
fig, ax = plt.subplots(nrows=2, ncols=1, figsize=[20, 20])
plt.subplots_adjust(hspace=0.3)
# Set ymax at least = 1:
maxval1 = dftmp[list(set(seasons).union(['corredor alto', 'intensidade muito alta']).
difference(['SRAG2009']))].max().max()
maxval2 = dftmp[list(set(seasons).union(['curva epi. alta', 'intensidade muito alta']).
difference(['SRAG2009']))].max().max()
if maxval1 < 1:
ax[0].set_ylim([0, 1])
ax[1].set_ylim([0, 1])
else:
ax[0].set_ylim([0, maxval1])
ax[1].set_ylim([0, maxval2])
# if uf == 33:
# ax[0].set_ylim([0,0.25])
# elif uf == 32:
# ax[0].set_ylim([0,0.3])
ax[0].fill_between(dftmp['epiweek'], 0, dftmp['corredor baixo'], color='green', alpha=0.5)
ax[0].fill_between(dftmp['epiweek'], dftmp['corredor baixo'], dftmp['corredor mediano'], color='yellow',
alpha=0.5)
ax[0].fill_between(dftmp['epiweek'], dftmp['corredor mediano'], dftmp['corredor alto'], color='orange',
alpha=0.5)
dftmp.plot(ax=ax[0], x='epiweek', y=seasons)
dftmp.plot(ax=ax[0], x='epiweek', y=lastseason, color='k', lw=3)
dftmp.plot(ax=ax[0], x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
# dftmp.plot(ax=ax[0], x='epiweek', y='intensidade baixa', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade alta', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade muito alta', style='--', color=colorcode[-1])
# Check for maximum value on y-axis and fill from 'corredor alto' to maxy
dftmp.plot(ax=ax[0], x='epiweek', y='corredor alto', legend=False, alpha=0)
miny, maxy = ax[0].get_ylim()
del (ax[0].lines[-1])
ax[0].fill_between(dftmp['epiweek'], dftmp['corredor alto'], maxy, color='red', alpha=0.5)
ax[0].set_ylim([miny, maxy])
for label in ax[0].get_xticklabels():
label.set_fontproperties(fontpropticks)
for label in ax[0].get_yticklabels():
label.set_fontproperties(fontpropticks)
#### Start absolute value plot as inset ####
sns.set_style('whitegrid')
axinset = inset_axes(ax[0], width='35%', height='35%', loc=1)
maxval = dftmpinset[list(set(seasons).union([lastseason]).difference(['SRAG2009']))].max().max()
if maxval < 1:
axinset.set_ylim([0, 1])
else:
axinset.set_ylim([0, maxval])
dftmpinset.plot(ax=axinset, x='epiweek', y=seasons)
dftmpinset.plot(ax=axinset, x='epiweek', y=lastseason, color='k', lw=3)
dftmpinset.plot(ax=axinset, x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
axinset.legend_.remove()
axinset.set_xlabel('SE', fontproperties=fontproplblinset)
axinset.set_ylabel('Casos', fontproperties=fontproplblinset)
axinset.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for label in axinset.get_xticklabels():
label.set_fontproperties(fontpropticksinset)
for label in axinset.get_yticklabels():
label.set_fontproperties(fontpropticksinset)
#### Start plot relative to outbreak typical curve ####
ax[1].fill_between(dftmp['SE relativa ao início do surto'], 0, dftmp['curva epi. baixa'], color='green',
alpha=0.5)
ax[1].fill_between(dftmp['SE relativa ao início do surto'], dftmp['curva epi. baixa'],
dftmp['curva epi. mediana'], color='yellow', alpha=0.5)
ax[1].fill_between(dftmp['SE relativa ao início do surto'], dftmp['curva epi. mediana'],
dftmp['curva epi. alta'], color='orange', alpha=0.5)
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='curva epi. mediana', color='silver',
label='tendência mediana')
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='limiar pré-epidêmico', style='--',
color='red', alpha=0.8)
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='limiar pós-epidêmico', style='--',
color='green', alpha=0.5)
epicolor = []
for s in epicols:
s = s.strip(' transladado')
n = list(seasons).index(s)
epicolor.append(colorcode[n])
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y=epicols, color=epicolor)
# Check for maximum value on y-axis and fill from 'corredor alto' to maxy
dftmp.plot(ax=ax[1], x='SE relativa ao início do surto', y='curva epi. alta', legend=False, alpha=0)
miny, maxy = ax[1].get_ylim()
del (ax[1].lines[-1])
ax[1].fill_between(dftmp['SE relativa ao início do surto'], dftmp['curva epi. alta'], maxy, color='red',
alpha=0.5)
ax[1].set_ylim([miny, maxy])
ax[1].plot([0, 0], [miny, maxy], '--', color='silver')
duracao = int(thresholds['mean.length'][0])
ax[1].plot([duracao, duracao], [miny, maxy], '--', color='silver')
ax[1].set_title('Tendência ao longo do surto', fontproperties=fontproplbl)
epistart = int(thresholds['mean.start'][0])
ax[1].set_xlabel('SE em relação à semana típica de início do surto (SE=%s)' % epistart,
fontproperties=fontproplbl)
minx, maxx = ax[1].get_xlim()
xticks = sort(np.append(np.arange(0, int(minx), -4), np.arange(4, int(maxx), 4)))
ax[1].set_xticks(xticks)
ax[1].set_xticklabels(xticks, fontproperties=fontpropticks)
for label in ax[0].get_yticklabels():
label.set_fontproperties(fontpropticks)
ax[1].set_ylabel('Incidência (por 100mil habitantes)', fontproperties=fontproplbl)
box = ax[1].get_position()
ax[1].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax[1].legend(prop=fontproplgd, loc='center left', bbox_to_anchor=(1, 0.5))
ax[0].set_title(tabela_ufnome[uf], fontproperties=fontproplbl)
ax[0].set_xlabel('SE', fontproperties=fontproplbl)
ax[0].set_ylabel('Incidência (por 100mil habitantes)', fontproperties=fontproplbl)
xticks = np.arange(4, 53, 4)
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticks)
# Shrink current axis by 10%
box = ax[0].get_position()
ax[0].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax[0].legend(prop=fontproplgd, loc='center left', bbox_to_anchor=(1, 0.5))
return fig
def plotmemfailedcurve(uf, dftmp, dftmpinset, seasons, lastseason):
sns.set_style('darkgrid')
sns.set_context("talk")
sns.set_palette('Set2', len(seasons) + 4)
colorcode = sns.color_palette('Set2', len(seasons) + 4)
fig, axi = plt.subplots(nrows=1, ncols=1, figsize=[20, 10])
ax = [axi]
maxval1 = dftmp[list(set(seasons).union([lastseason]).difference(['SRAG2009']))].max().max()
if maxval1 < 1:
ax[0].set_ylim([0, 1])
else:
ax[0].set_ylim([0, maxval1])
# if uf == 33:
# ax[0].set_ylim([0,0.25])
# elif uf == 32:
# ax[0].set_ylim([0,0.3])
dftmp.plot(ax=ax[0], x='epiweek', y=seasons)
dftmp.plot(ax=ax[0], x='epiweek', y=lastseason, color='k', lw=3)
dftmp.plot(ax=ax[0], x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
# dftmp.plot(ax=ax[0], x='epiweek', y='intensidade baixa', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade alta', style='--')
dftmp.plot(ax=ax[0], x='epiweek', y='intensidade muito alta', style='--', color=colorcode[-1])
for label in ax[0].get_xticklabels():
label.set_fontproperties(fontpropticks)
for label in ax[0].get_yticklabels():
label.set_fontproperties(fontpropticks)
#### Start absolute value plot as inset ####
sns.set_style('whitegrid')
axinset = inset_axes(ax[0], width='35%', height='35%', loc=1)
maxval = dftmpinset[list(set(seasons).union([lastseason]).difference(['SRAG2009']))].max().max()
if maxval < 1:
axinset.set_ylim([0, 1])
else:
axinset.set_ylim([0, maxval])
dftmpinset.plot(ax=axinset, x='epiweek', y=seasons)
dftmpinset.plot(ax=axinset, x='epiweek', y=lastseason, color='k', lw=3)
dftmpinset.plot(ax=axinset, x='epiweek', y='limiar pré-epidêmico', style='--', color='red', alpha=0.8)
axinset.legend_.remove()
axinset.set_xlabel('SE', fontproperties=fontproplblinset)
axinset.set_ylabel('Casos', fontproperties=fontproplblinset)
axinset.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
for label in axinset.get_xticklabels():
label.set_fontproperties(fontpropticksinset)
for label in axinset.get_yticklabels():
label.set_fontproperties(fontpropticksinset)
ax[0].set_title(tabela_ufnome[uf], fontproperties=fontproplbl)
ax[0].set_xlabel('SE', fontproperties=fontproplbl)
ax[0].set_ylabel('Incidência (por 100mil habitantes)', fontproperties=fontproplbl)
xticks = np.arange(4, 53, 4)
ax[0].set_xticks(xticks)
ax[0].set_xticklabels(xticks)
# Shrink current axis by 10%
box = ax[0].get_position()
ax[0].set_position([box.x0, box.y0, box.width * 0.8, box.height])
ax[0].legend(prop=fontproplgd, loc='center left', bbox_to_anchor=(1, 0.5))
return fig
def recalc_incidence(x, popnorm):
# Recalculate incidence based on integer values.
# Useful for values calculated assuming continuous functions.
return round(x/popnorm)*popnorm
def main(fname, plot_curves=False, sep=',', uflist='all', out_pref=''):
pref = ('.'.join(fname.replace('-incidence', '').split('.')[:-1])).split('/')[-1]
fname = fname.replace('covid', 'flu')
df = pd.read_csv(fname, sep=sep, encoding='utf-8')
dfinset = pd.read_csv(fname.replace('-incidence', ''), sep=sep, encoding='utf-8')
if 'Região' in list(df.columns):
df.rename(columns={'Região': 'UF'}, inplace=True)
dfinset.rename(columns={'Região': 'UF'}, inplace=True)
df.UF = df.UF.astype(str)
dfinset.UF = dfinset.UF.astype(str)
plt.interactive(False)
if uflist == 'all':
uflist = list(df.UF.unique())
dfpop = pd.read_csv('../data/populacao_uf_regional_atual.csv', encoding='utf-8')
dfreport = pd.DataFrame()
dfcorredor = pd.DataFrame()
dfreport_cases = pd.DataFrame()
dfcorredor_cases = pd.DataFrame()
cols_report = ['UF', 'População', 'Média geométrica do pico de infecção das temporadas regulares',
'região de baixa atividade típica',
'limiar pré-epidêmico', 'intensidade alta', 'intensidade muito alta',
'SE típica do início do surto',
'SE típica do início do surto - IC inferior (2,5%)',
'SE típica do início do surto - IC superior (97,5%)',
'duração típica do surto',
'duração típica do surto - IC inferior (2,5%)',
'duração típica do surto - IC superior (97,5%)',
'temporadas utilizadas para os corredores endêmicos',
'ano']
cols_corredor = ['UF', 'População', 'epiweek', 'corredor baixo', 'corredor mediano', 'corredor alto', 'ano']
# Define outbreak window method.
# Check epitiming function from MEM package for detail
# 1: original method published in Vega et al.
# 2: second derivative fixed criterium
wdw_method = 2
wdw_method_lbl = {1: 'original', 2: 'criterium'}
mem_calc = {'SUCCESS': [], 'FAILED': []}
for uf in uflist:
if uf not in list(df.UF.unique()):
continue
dftmp = df[df.UF == uf].reset_index().drop('index', axis=1).copy()
dftmpinset = dfinset[dfinset.UF == uf].reset_index().drop('index', axis=1).copy()
seasons = sorted([x for x in dftmp.columns if 'SRAG' in x])
lastseason = seasons[-1]
dftmp['ano'] = lastseason.strip('SRAG')
dftmpinset['ano'] = lastseason.strip('SRAG')
seasons = list(np.delete(seasons, -1))
# Select "regular seasons" by comparing geometric distance of corresponding peaks
# discard season 2009 by default
tmpseasons = seasons.copy()
if 'SRAG2009' in tmpseasons:
tmpseasons.remove('SRAG2009')
if 'SRAG2020' in tmpseasons:
tmpseasons.remove('SRAG2020')
discarded_seasons = discardseasons(df=dftmp, seasons=tmpseasons, gdthres=2.8, smin=4)
discarded_seasons.extend(['SRAG2009'])
if lastseason != 'SRAG2020':
discarded_seasons.extend(['SRAG2020'])
discarded_seasons.extend([lastseason])
# Calculate incidence normalization factor, per 100.000
incidence_norm = np.float(100000 / dfpop.loc[dfpop['Código'] == str(uf), 'Total'])
lowseasons = set()
dftmp['região de baixa atividade típica'] = 0
try:
if dftmpinset[list(set(seasons).difference(discarded_seasons))].max().max() < 3:
dftmp['região de baixa atividade típica'] = 1
thresholds, lowseasons = applymem(dftmp[seasons],
discarded_seasons,
wdw_method,
lower_bound=1*incidence_norm)
if thresholds['pre.post.intervals'].loc['pre', 2] >= 1*incidence_norm:
dftmp['mediana pré-epidêmica'] = recalc_incidence(thresholds['pre.post.intervals'].loc['pre', 1], incidence_norm)
dftmp['limiar pré-epidêmico'] = recalc_incidence(thresholds['pre.post.intervals'].loc['pre', 2],
incidence_norm)
dftmp['SE relativa ao início do surto'] = dftmp['epiweek'] - thresholds['mean.start']
dftmp['SE típica do início do surto'] = thresholds['mean.start']
# Confidence interval for epi.start
cimin = thresholds['ci.start'].loc[0, 0]
cimax = thresholds['ci.start'].loc[0, 2]
dftmp['SE típica do início do surto - IC inferior (2,5%)'] = cimin
dftmp['SE típica do início do surto - IC superior (97,5%)'] = cimax
dftmp['duração típica do surto'] = thresholds['mean.length']
# Confidence interval for epi.length
cimin = thresholds['ci.length'].loc[1, 0]
cimax = thresholds['ci.length'].loc[1, 2]
dftmp['duração típica do surto - IC inferior (2,5%)'] = cimin
dftmp['duração típica do surto - IC superior (97,5%)'] = cimax
else:
dftmp['região de baixa atividade típica'] = 1
dftmp['mediana pré-epidêmica'] = np.nan
dftmp['limiar pré-epidêmico'] = 1 * incidence_norm
dftmp['SE relativa ao início do surto'] = np.nan
dftmp['SE típica do início do surto'] = np.nan
# Confidence interval for epi.start
cimin = np.nan
cimax = np.nan
dftmp['SE típica do início do surto - IC inferior (2,5%)'] = cimin
dftmp['SE típica do início do surto - IC superior (97,5%)'] = cimax
dftmp['duração típica do surto'] = np.nan
# Confidence interval for epi.length
cimin = np.nan
cimax = np.nan
dftmp['duração típica do surto - IC inferior (2,5%)'] = cimin
dftmp['duração típica do surto - IC superior (97,5%)'] = cimax
dftmp['limiar pós-epidêmico'] = recalc_incidence(thresholds['pre.post.intervals'].loc['post', 2],
incidence_norm)
dftmp['intensidade baixa'] = recalc_incidence(thresholds['epi.intervals'].loc[0, 3], incidence_norm)
dftmp['intensidade alta'] = recalc_incidence(max([2*incidence_norm, thresholds['epi.intervals'].loc[1,
3]]),
incidence_norm)
dftmp['intensidade muito alta'] = recalc_incidence(max([3*incidence_norm, thresholds[
'epi.intervals'].loc[2, 3]]), incidence_norm)
dftmp['corredor baixo'] = recalc_incidence(thresholds['typ.real.curve']['baixo'], incidence_norm)
dftmp['corredor mediano'] = recalc_incidence(thresholds['typ.real.curve']['mediano'], incidence_norm)
dftmp['corredor alto'] = recalc_incidence(thresholds['typ.real.curve']['alto'], incidence_norm)
dftmp['População'] = int(dfpop.loc[dfpop['Código'] == str(uf), 'Total'])
dftmp['curva epi. baixa'] = recalc_incidence(thresholds['typ.curve']['baixo'], incidence_norm)
dftmp['curva epi. mediana'] = recalc_incidence(thresholds['typ.curve']['mediano'], incidence_norm)
dftmp['curva epi. alta'] = recalc_incidence(thresholds['typ.curve']['alto'], incidence_norm)
epicols = list(thresholds['moving.epidemics'].columns)
dftmp[epicols] = thresholds['moving.epidemics']
dftmp['n.seasons'] = thresholds['n.seasons']
dftmp['temporadas utilizadas para os corredores endêmicos'] = ', '.join(str(x).strip('SRAG') for x in
sorted(set(
seasons).difference(discarded_seasons)))
# Geometric mean of regular seasons' peak:
dftmp_peaks = dftmp[seasons].max()
peak_gmean = gmean(dftmp_peaks[list(set(seasons).difference(discarded_seasons))])
dftmp_peaks_inset = dftmpinset[seasons].max()
peak_gmean_inset = gmean(dftmp_peaks_inset[list(set(seasons).difference(discarded_seasons))])
dftmp['Média geométrica do pico de infecção das temporadas regulares'] = peak_gmean
dftmpinset['Média geométrica do pico de infecção das temporadas regulares'] = peak_gmean_inset
for lbl in seasons:
peak = dftmp_peaks[lbl]
peak_inset = dftmp_peaks_inset[lbl]
if peak == 0:
textval = '-'
textval_inset = '-'
else:
geom_dist = np.log(peak) - np.log(peak_gmean)
geom_dist_inset = np.log(peak_inset) - np.log(peak_gmean_inset)
textval = '%.2f' % np.e ** abs(geom_dist) + ' vez(es) ' + ("maior" if geom_dist > 0 else "menor")
textval_inset = '%.2f' % np.e ** abs(geom_dist_inset) + ' vez(es) ' + ("maior" if geom_dist > 0
else "menor")
dftmp['Distância geométrica do pico na temporada %s' % lbl] = textval
dftmpinset['Distância geométrica do pico na temporada %s' % lbl] = textval_inset
dftmp.to_csv('./mem-data/%s-mem-%s-incidencia-dropgdist%s-droplow%s-%s_method.csv' % (
pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
index=False, encoding='utf-8')
dftmpinset['região de baixa atividade típica'] = dftmp['região de baixa atividade típica']
dftmpinset['limiar pré-epidêmico'] = max([1, round(
thresholds['pre.post.intervals'].loc['pre', 2]/incidence_norm)])
dftmpinset['limiar pós-epidêmico'] = max([1, round(
thresholds['pre.post.intervals'].loc['post', 2]/incidence_norm)])
dftmpinset['intensidade baixa'] = round(thresholds['epi.intervals'].loc[0, 3]/incidence_norm)
dftmpinset['intensidade alta'] = max([2, round(
thresholds['epi.intervals'].loc[1, 3]/incidence_norm)])
dftmpinset['intensidade muito alta'] = max([3, round(
thresholds['epi.intervals'].loc[2, 3]/incidence_norm)])
dftmpinset['corredor baixo'] = round(dftmp['corredor baixo']/incidence_norm)
dftmpinset['corredor mediano'] = round(dftmp['corredor mediano']/incidence_norm)
dftmpinset['corredor alto'] = round(dftmp['corredor alto']/incidence_norm)
dftmpinset['SE relativa ao início do surto'] = dftmp['SE relativa ao início do surto']
dftmpinset['SE típica do início do surto'] = dftmp['SE típica do início do surto']
dftmpinset['SE típica do início do surto - IC inferior (2,5%)'] = \
dftmp['SE típica do início do surto - IC inferior (2,5%)']
dftmpinset['SE típica do início do surto - IC superior (97,5%)'] = \
dftmp['SE típica do início do surto - IC superior (97,5%)']
dftmpinset['duração típica do surto'] = dftmp['duração típica do surto']
dftmpinset['duração típica do surto - IC inferior (2,5%)'] = \
dftmp['duração típica do surto - IC inferior (2,5%)']
dftmpinset['duração típica do surto - IC superior (97,5%)'] = \
dftmp['duração típica do surto - IC superior (97,5%)']
dftmpinset['curva epi. baixa'] = round(dftmp['curva epi. baixa']/incidence_norm)
dftmpinset['curva epi. mediana'] = round(dftmp['curva epi. mediana']/incidence_norm)
dftmpinset['curva epi. alta'] = round(dftmp['curva epi. alta']/incidence_norm)
epicols = list(thresholds['moving.epidemics'].columns)
dftmpinset[epicols] = thresholds['moving.epidemics']
dftmpinset['n.seasons'] = thresholds['n.seasons']
dftmpinset['População'] = dftmp['População']
dftmpinset['temporadas utilizadas para os corredores endêmicos'] = \
dftmp['temporadas utilizadas para os corredores endêmicos']
dftmpinset.to_csv(
'./mem-data/%s-mem-%s-dropgdist%s-droplow%s-%s_method.csv' % (pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace(
'SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
index=False, encoding='utf-8')
if plot_curves:
fig = plotmemcurve(uf=uf, dftmp=dftmp, dftmpinset=dftmpinset, thresholds=thresholds, seasons=seasons,
lastseason=lastseason, epicols=epicols)
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.svg' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.png' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
plt.clf()
plt.close()
mem_calc['SUCCESS'].extend([uf])
except:
mem_calc['FAILED'].extend([uf])
dftmp['região de baixa atividade típica'] = 1
dftmpinset['região de baixa atividade típica'] = 1
thresholds = extract_typ_real_curve(dftmp[seasons], discarded_seasons, wdw_method,
lower_bound=1*incidence_norm)
dftmp['Média geométrica do pico de infecção das temporadas regulares'] = np.nan
dftmp['mediana pré-epidêmica'] = np.nan
dftmp['limiar pré-epidêmico'] = 1 * incidence_norm
dftmp['limiar pós-epidêmico'] = 1 * incidence_norm
dftmp['intensidade baixa'] = 0
dftmp['intensidade alta'] = 2 * incidence_norm
dftmp['intensidade muito alta'] = 3 * incidence_norm
dftmp['corredor baixo'] = recalc_incidence(thresholds['typ.real.curve']['baixo'], incidence_norm)
dftmp['corredor mediano'] = recalc_incidence(thresholds['typ.real.curve']['mediano'], incidence_norm)
dftmp['corredor alto'] = recalc_incidence(thresholds['typ.real.curve']['alto'], incidence_norm)
dftmp['SE típica do início do surto'] = np.nan
dftmp['duração típica do surto'] = np.nan
dftmp['Média geométrica do pico de infecção das temporadas regulares'] = np.nan
dftmp['SE típica do início do surto - IC inferior (2,5%)'] = np.nan
dftmp['SE típica do início do surto - IC superior (97,5%)'] = np.nan
dftmp['duração típica do surto - IC inferior (2,5%)'] = np.nan
dftmp['duração típica do surto - IC superior (97,5%)'] = np.nan
dftmp['População'] = int(dfpop.loc[dfpop['Código'] == str(uf), 'Total'])
dftmp['temporadas utilizadas para os corredores endêmicos'] = np.nan
dftmp.to_csv('./mem-data/%s-memfailed-%s-incidencia-dropgdist%s-%s_method.csv' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''), wdw_method_lbl[wdw_method]), index=False,
encoding='utf-8')
dftmpinset['Média geométrica do pico de infecção das temporadas regulares'] = np.nan
dftmpinset['limiar pré-epidêmico'] = 1
dftmpinset['limiar pós-epidêmico'] = 1
dftmpinset['intensidade baixa'] = 0
dftmpinset['intensidade alta'] = 2
dftmpinset['intensidade muito alta'] = 3
dftmpinset['corredor baixo'] = round(dftmp['corredor baixo']/incidence_norm)
dftmpinset['corredor mediano'] = round(dftmp['corredor mediano']/incidence_norm)
dftmpinset['corredor alto'] = round(dftmp['corredor alto']/incidence_norm)
dftmpinset['SE relativa ao início do surto'] = np.nan
dftmpinset['SE típica do início do surto'] = np.nan
dftmpinset['SE típica do início do surto - IC inferior (2,5%)'] = np.nan
dftmpinset['SE típica do início do surto - IC superior (97,5%)'] = np.nan
dftmpinset['duração típica do surto'] = dftmp['duração típica do surto']
dftmpinset['duração típica do surto - IC inferior (2,5%)'] = np.nan
dftmpinset['duração típica do surto - IC superior (97,5%)'] = np.nan
dftmpinset['curva epi. baixa'] = np.nan
dftmpinset['curva epi. mediana'] = np.nan
dftmpinset['curva epi. alta'] = np.nan
dftmpinset['n.seasons'] = 0
dftmpinset['População'] = dftmp['População']
dftmpinset['temporadas utilizadas para os corredores endêmicos'] = np.nan
dftmpinset.to_csv(
'./mem-data/%s-memfailed-%s-dropgdist%s-%s_method.csv' % (pref, tabela_ufnome[uf].
replace(' ', '_'),
'-'.join(discarded_seasons).replace(
'SRAG', ''),
wdw_method_lbl[wdw_method]),
index=False, encoding='utf-8')
if plot_curves:
fig = plotmemfailedcurve(uf=uf, dftmp=dftmp, dftmpinset=dftmpinset, seasons=seasons,
lastseason=lastseason)
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.svg' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
fig.savefig(
'./mem-data/%s-%s-inset-dropgdist%s-droplow%s-%s_method.png' %
(pref, tabela_ufnome[uf].replace(' ', '_'),
'-'.join(discarded_seasons).replace('SRAG', ''),
'-'.join(lowseasons).replace('SRAG', ''),
wdw_method_lbl[wdw_method]),
bbox_inches='tight')
plt.clf()
plt.close()
dfreport = dfreport.append(dftmp[cols_report].head(1), ignore_index=True, sort=True)
dfcorredor = dfcorredor.append(dftmp[cols_corredor], ignore_index=True, sort=True)
dfreport_cases = dfreport_cases.append(dftmpinset[cols_report].head(1), ignore_index=True, sort=True)
dfcorredor_cases = dfcorredor_cases.append(dftmpinset[cols_corredor], ignore_index=True, sort=True)
for dfloop in [dfreport, dfcorredor]:
dfloop['Unidade da Federação'] = dfloop.UF.map(tabela_ufnome)
dfloop['Tipo'] = 'Estado'
dfloop.loc[dfloop['UF'].isin(['RegN', 'RegL', 'RegC', 'RegS']) ,'Tipo'] = 'Regional'
dfloop.loc[dfloop['UF'].isin(['N', 'S', 'CO', 'SE', 'NE']) ,'Tipo'] = 'Região'
dfloop.loc[dfloop['UF'] == 'BR' ,'Tipo'] = 'País'
dfreport.to_csv('./mem-data/%s-mem-report-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]), index=False)
dfreport.to_csv('../clean_data/%smem-report.csv' % out_pref, index=False)
dfreport_cases[['Unidade da Federação', 'Tipo']] = dfreport[['Unidade da Federação', 'Tipo']]
dfreport_cases.to_csv('./mem-data/%s-mem-report_cases-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]),
index=False)
dfreport_cases.to_csv('../clean_data/%smem-report_cases.csv' % out_pref, index=False)
dfcorredor.to_csv('./mem-data/%s-mem-typical-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]), index=False)
dfcorredor.to_csv('../clean_data/%smem-typical.csv' % out_pref, index=False)
dfcorredor_cases[['Unidade da Federação', 'Tipo']] = dfcorredor[['Unidade da Federação', 'Tipo']]
dfcorredor_cases.to_csv('./mem-data/%s-mem-typical_cases-%s-method.csv' % (pref, wdw_method_lbl[wdw_method]),
index=False)
dfcorredor_cases.to_csv('../clean_data/%smem-typical_cases.csv' % out_pref, index=False)
module_logger.info('MEM calculation outcome:\n - SUCCESS: %(SUCCESS)s\n - FAILED: %(FAILED)s' % mem_calc)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Generate MEM analysis from cleaned SINAN-SRAG data,\n" +
"for specified Federal Units, if any. If none specified, runs for all.\n" +
"Example usage:\n" +
"python3 sinan-mem-inset-thresholds.py --path clean_data4mem-incidence.csv " +
"--plot False --uflist Aw Cf\n",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--path', help='Path to data file')
parser.add_argument('--sep', help='Column separator', default=',')
parser.add_argument('--plot', help='Plot curves', default=False)
parser.add_argument('--uflist', nargs='*', default='all')
args = parser.parse_args()
args.plot = to_bool(args.plot)
print(args)
main(fname=args.path, plot_curves=bool(args.plot), sep=args.sep, uflist=args.uflist)
| gpl-3.0 | -6,955,606,979,555,097,000 | 51.231039 | 129 | 0.583441 | false | 3.04462 | false | false | false |
Kuniwak/vint | vint/bootstrap.py | 1 | 1382 | import importlib
import pkgutil
from pathlib import Path
from vint.linting.cli import start_cli
import logging
LOG_FORMAT = 'vint %(levelname)s: %(message)s'
def init_logger():
logging.basicConfig(format=LOG_FORMAT)
def init_linter():
import_all_policies()
def init_cli():
start_cli()
def import_all_policies():
""" Import all policies that were registered by vint.linting.policy_registry.
Dynamic policy importing is comprised of the 3 steps
1. Try to import all policy modules (then we can't know what policies exist)
2. In policy module, register itself by using vint.linting.policy_registry
3. After all policies registered by itself, we can get policy classes
"""
pkg_name = _get_policy_package_name_for_test()
pkg_path_list = pkg_name.split('.')
pkg_path = str(Path(_get_vint_root(), *pkg_path_list).resolve())
for _, module_name, is_pkg in pkgutil.iter_modules([pkg_path]):
if not is_pkg:
module_fqn = pkg_name + '.' + module_name
logging.debug('Loading the policy module: `{fqn}`'.format(fqn=module_fqn))
importlib.import_module(module_fqn)
def _get_vint_root():
return Path(__file__).parent.parent
def _get_policy_package_name_for_test():
""" Test hook method that returns a package name for policy modules. """
return 'vint.linting.policy'
| mit | 1,752,616,255,235,649,500 | 26.64 | 86 | 0.675109 | false | 3.580311 | false | false | false |
AdamRTomkins/libSpineML | libSpineML/smlBundle.py | 1 | 7712 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""SpineML Bundle Module
This modual will form a convience class to bundle together related SpineML
objects into a single standard object which can be easily passed between
programs. The bundle will be able to interact with premade spineML objects
through the other support classes, or parse directly from XML
TODO:
## export all as a loop through
## export each element, as a pass through
## import a project file
"""
import os
import pdb
import tempfile
import smlExperiment # SpineML layer classes
import smlNetwork
import smlComponent
class Bundle(object):
"""Bundle instances are a container class for the various spineML specifications.
Each specification is stored a list of objects.
"""
def __init__(self, experiments=None, networks=None, components=None,project_dict=None):
self.experiments = []
self.components = []
self.networks = []
self.index = {}
if type(experiments) is not type(None):
if type(experiments) is smlExperiment.SpineMLType:
self.experiments.append(experiments)
elif type(experiments) is list:
for e in experiments:
if type(e) is not smlExperiment.SpineMLType:
raise TypeError('Invalid Experiment Input: %s' % str(type(e)))
else:
self.experiments.append(e)
else:
raise TypeError('Invalid Experiment Input: %s' % str(type(experiments)))
if type(networks) is not type(None):
if type(networks) is smlNetwork.SpineMLType:
self.networks.append(networks)
elif type(networks) is list:
for n in networks:
if type(n) is not smlNetwork.SpineMLType:
raise TypeError('Invalid Network Input: %s' % str(type(n)))
else:
self.networks.append(n)
else:
raise TypeError('Invalid Network Input: %s' % str(type(networks)))
if type(components) is not type(None):
if type(components) is smlComponent.SpineMLType:
self.components.append(components)
elif type(components) is list:
for c in components:
if type(c) is not smlComponent.SpineMLType:
raise TypeError('Invalid Component Input: %s' % str(type(c)))
else:
self.components.append(c)
else:
raise TypeError('Invalid Component Input: %s' % str(type(components)))
if type(project_dict) is not type(None):
assert 'experiment' in project_dict
assert 'network' in project_dict
assert 'components' in project_dict
# set experiment
# eg: 'experiment':('emperiment0.xml','<xml content>')
print project_dict['experiment']
experiment_file, experiment_xml = project_dict['experiment']
with tempfile.NamedTemporaryFile() as temp:
temp.write(experiment_xml)
temp.flush()
temp.seek(0)
exp_obj = smlExperiment.parse(temp,True)
self.experiments.append(exp_obj)
# build up the experiment index
self.index[experiment_file] = {}
self.index[experiment_file]['experiment'] = {experiment_file:exp_obj}
# set network
# eg: 'network':('model.xml','<xml content>')
network_file, network_xml = project_dict['network']
with tempfile.NamedTemporaryFile() as temp:
temp.write(network_xml)
temp.flush()
temp.seek(0)
net_obj = smlNetwork.parse(temp,True)
self.networks.append(net_obj)
self.index[experiment_file]['network'] = {}
self.index[experiment_file]['network'][network_file] = net_obj
# set components
for component_file,component_xml in project_dict['components']:
with tempfile.NamedTemporaryFile() as temp:
temp.write(component_xml)
temp.flush()
temp.seek(0)
comp_obj = smlComponent.parse(temp,True)
self.components.append(comp_obj)
self.index[experiment_file]['component'] = {}
self.index[experiment_file]['component'][component_file] = comp_obj
def add_experiment(self, experiment,recursive=False):
"""Add a SpineML Experiment stored as SpineMLType types, to the bundle
Setting recursive=True will enable the experiment to add further subcomponents
which it accesses, such as the network file and the component file.
Adding an experiment using the recursive option also builds an index, which
may provide a more organic structure
"""
if type(experiment) is smlExperiment.SpineMLType:
self.experiments.append(experiment)
elif type(experiment) is str:
exp_obj = smlExperiment.parse(experiment,True)
self.experiments.append(exp_obj)
exp_file = os.path.basename(experiment)
# build up the experiment index
self.index[exp_file] = {}
self.index[exp_file]['experiment'] = {exp_file:exp_obj}
if recursive:
# Add the linked model files if recursive is set to true.
path = os.path.dirname(experiment) + '/'
if path == '/':
path = ''
for e in exp_obj.Experiment:
self.add_network(path+e.Model.network_layer_url,True,exp_file)
else:
raise TypeError('Invalid Experiment Input: %s' % str(type(experiment)))
def add_network(self, network,recursive=False,index=None):
"""Add a SpineML Network stored as a SpineMLType, to the bundle
When building an index recursively, pass the experiment file name as the index
"""
if type(network) is smlNetwork.SpineMLType:
self.networks.append(network)
elif type(network) is str:
net_file = os.path.basename(network)
path = os.path.dirname(network) + '/'
if path == '/':
path = ''
net_obj = smlNetwork.parse(network,True)
self.networks.append(net_obj)
if recursive:
if index is not None:
self.index[index]['network'] = {net_file:net_obj}
# Add the linked component files if recursive is set to true
for n in net_obj.Population:
self.add_component(smlComponent.parse(path + n.Neuron.url,True))
if index is not None:
self.index[index]['component'] = {n.Neuron.url:self.components[-1]}
else:
raise TypeError('Invalid Network Input %s' % str(type(network)))
def add_component(self, component):
"""Add a SpineML Component of SpineMLType type to the bundle
"""
if type(component) is smlComponent.SpineMLType:
self.components.append(component)
elif type(component) is str:
self.components.append(smlComponent.parse(component,True))
else:
raise TypeError('Invalid Component Input %s' % str(type(component)))
| gpl-3.0 | -8,174,739,726,877,663,000 | 37.56 | 91 | 0.566131 | false | 4.53114 | false | false | false |
hbp-brain-charting/public_protocols | mtt/paradigm_descriptors/paradigm_descriptor_mtt.py | 1 | 8624 | # -*- coding: utf-8 -*-
"""
Script for paradigm descriptors' extraction on the Mental-Time-Travel protocol
for both models
author: Ana Luisa Pinho
e-mail: [email protected]
Last update: November 2019
Compatibility: Python 3.5
"""
import os
import glob
import csv
import numpy as np
# %%
# ========================== GENERAL PARAMETERS ===============================
REFERENCES_WE = ['lermite_observe', 'debit_reduit',
'les_animaux_broutent', 'premiere_rencontre',
'seconde_rencontre']
REFERENCES_SN = ['dolmens_sous_la_pluie', 'le_grand_pretre_observe',
'les_feux_follets_sallument', 'premier_rituel',
'second_rituel']
CUES_SPACE = ['sud_ou_nord', 'sud_ou_nord', 'ouest_ou_est', 'ouest_ou_est']
CUES_TIME = ['avant_ou_apres', 'avant_ou_apres']
# *****************************************************************************
# #######################################################
# # Island story
# island = 'we'
# # Participants' list
# participant_list = [1, 4, 5, 7, 8, 9, 12, 13, 14]
# # Which file to load? (numbering starts from 0)
# input_no = 0
# # Sessions's ID (numbering starts from 0)
# first_sess = 0
# last_sess = 2
# #######################################################
'''
Exceptions for IBC participants of island "we":
Participant: input_no, first_sess, last_sess
sub-06: 0, 0, 0
sub-06: 1, 1, 2
sub-11: 0, 0, 1
sub-11: 1, 2, 2
sub-15: 0, 0, 0 (very incomplete)
sub-15: 1, 1, 2
'''
# # Island story
# island = 'we'
# # Participants' list
# participant_list = [06]
# # Which file to load? (numbering starts from 0)
# input_no = 0
# # Sessions's ID (numbering starts from 0)
# first_sess = 0
# last_sess = 0
# #######################################################
# # Island story
# island = 'sn'
# # Participants' list
# participant_list = [1, 4, 5, 6, 7, 9, 11, 12, 13, 14]
# # Which file to load? (numbering starts from 0)
# input_no = 0
# # Sessions's ID (numbering starts from 0)
# first_sess = 0
# last_sess = 2
'''
Exceptions for IBC participants of island "sn":
sub-15: no runs
'''
# #######################################################
# *****************************************************************************
# #### DEFINE PATHWAYS ####
# Parent directory
main_dir = '../../../../analysis_pipeline/ibc_main/neurospin_data/info'
# Subject folder
# fname_prefix = 'pilot'
fname_prefix = 'sub'
# Name of the task protocol
protocol = 'mtt'
# fname of folder with log_files
raw_fname = 'log_' + island
# %%
# ============================== FUNCTIONS ====================================
def create_new_dir(dir_path):
"""
Creates directory of output files
"""
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def load_log_file(input_dir, prefix, subject, task, logdir, no):
"""
Load the log files
"""
filename_participant_id = prefix + "-" + "%02d" % subject
# Set the pathway of the input files
inputs_path = os.path.join(input_dir, filename_participant_id, task,
logdir)
inputs = glob.glob(os.path.join(inputs_path, "*.xpd"))
inputs.sort()
fname = inputs[no]
# Load the file
inlist = []
inlist = [line for line in csv.reader(open(fname), delimiter=',')]
return inlist
def stack_descriptors(onsets, durations, names):
"""
Create table of paradigm descriptors
"""
# Headers of the paradigm descriptors' files according to BIDS
header = ['onset', 'duration', 'trial_type']
table = np.vstack((header, np.vstack((onsets, durations, names)).T))
return table
def save_output(file_path, liste):
"""
Save output file
"""
with open(file_path, 'w') as fp:
a = csv.writer(fp, delimiter='\t')
a.writerows(liste)
# %%
# ============================== PARSER =======================================
# %%
# Create a file for each participant and ...
for participant in participant_list:
# Clean or create output folders
path1 = os.path.join(main_dir, fname_prefix + '-' + '%02d' % participant,
protocol, 'absolute_model_' + island)
path2 = os.path.join(main_dir, fname_prefix + '-' + '%02d' % participant,
protocol, 'relative_model_' + island)
create_new_dir(path1)
create_new_dir(path2)
# Load input files
input_list = load_log_file(main_dir, fname_prefix, participant, protocol,
raw_fname, input_no)
# Parse the necessary information
for r, row in enumerate(input_list):
if row[0] == str(participant):
break
else:
continue
input_list = input_list[r:]
# Create a list of sessions' list
data_list = []
length = 0
for b, block in enumerate(np.arange(first_sess, last_sess + 1)):
data_block = []
idx = b * length
for dl, line in enumerate(input_list[idx:]):
if line[1] == str(block):
data_block.append(line)
else:
length = dl
break
data_list.append(data_block)
continue
# ... for every block
for n, data in enumerate(data_list):
# Read the table
onset = []
duration = []
name_abs = []
name_relat = []
for datum in data:
if participant == 15 and datum[1] == '0' and datum[2] != '0' and \
island == 'we':
print(datum[8])
break
datum = datum[4:]
# Onsets and durations of conditions
onset.append(float(datum[5]) / 1000)
duration.append(float(datum[6]) / 1000)
# Names of conditions for both models
# Beginning of a trial
if datum[4] in REFERENCES_WE + REFERENCES_SN:
# References of relative model
name_relat.append(datum[0] + '_all_reference')
elif datum[4] in CUES_SPACE:
# References of absolute model for space
name_abs.append(datum[0] + '_' + datum[1] + '_reference')
# Space cues
name_abs.append(datum[0] + '_all_reference_space_cue')
name_relat.append(datum[0] + '_all_space_cue')
elif datum[4] in CUES_TIME:
# References of absolute model for time
name_abs.append(datum[0] + '_' + datum[2] + '_reference')
# Time cues
name_abs.append(datum[0] + '_all_reference_time_cue')
name_relat.append(datum[0] + '_all_time_cue')
elif datum[4] == 'response':
# Events of the relative model...
# ... for time
if datum[9] in ['before', 'after']:
name_abs.append(datum[0] + '_' + datum[2] + \
'_reference_' + datum[3] + '_event')
name_relat.append(datum[0] + '_' + datum[9] + '_' + \
datum[3] + '_event')
# ... for space
else:
name_abs.append(datum[0] + '_' + datum[1] + \
'_reference_' + datum[3] + '_event')
name_relat.append(datum[0] + '_' + datum[9] + 'side_' + \
datum[3] + '_event')
# Responses for both models
name_abs.append(datum[0] + '_all_reference_response')
name_relat.append(datum[0] + '_all_event_response')
# Events of the absolute model
else:
continue
# Stack onset, duration and trial_type arrays
abs_descriptors = stack_descriptors(onset, duration, name_abs)
relat_descriptors = stack_descriptors(onset, duration, name_relat)
# Output files
abs_fname = 'paradigm_descriptors_mtt_absolute-model' + '_' + \
island + '_' + fname_prefix + '-' + \
'%02d' % participant + '_run' + \
'%01d' % (n + first_sess) + '.tsv'
relat_fname = 'paradigm_descriptors_mtt_relative-model' + '_' + \
island + '_' + fname_prefix + '-' + \
'%02d' % participant + '_run' + \
'%01d' % (n + first_sess) + '.tsv'
output1 = os.path.join(path1, abs_fname)
output2 = os.path.join(path2, relat_fname)
print(output1, output2)
# Save files
save_output(output1, abs_descriptors)
save_output(output2, relat_descriptors)
| bsd-3-clause | -978,880,912,529,410,800 | 32.297297 | 79 | 0.50487 | false | 3.638819 | false | false | false |
GabrielCasarin/Allegri | Meta_Compilador/minimizador.py | 1 | 8819 | # Copyright (c) 2016 Gabriel Casarin da Silva, All Rights Reserved.
from comum.automatos import AutomatoFinito
from comum.automatos.estado import Estado, EstadoNaoDeterministico
def eliminar_transicoes_em_vazio(automato):
def epsilon_closure(estado):
fecho = [estado]
pilha = list(fecho)
while pilha:
el = pilha.pop()
if '' in el.simbolos:
for el2 in el['']:
if el2 not in fecho:
fecho.append(el2)
pilha.append(el2)
return fecho
def delta1(qi, simbolo, fecho):
D1 = []
for qj in fecho:
if simbolo in qj.simbolos:
for qk in qj[simbolo]:
for el in epsilon_closure(qk):
if el not in D1:
D1.append(el)
return D1
for Si in automato:
fecho = epsilon_closure(Si)
for simbolo in automato.alfabeto:
if simbolo != '':
D1 = delta1(Si, simbolo, fecho)
for el in D1:
Si[simbolo] = el
for Sj in fecho:
if not Si.final and Sj.final:
Si.final = True
for Si in automato:
del Si['']
def eliminar_indeterminismos(automato):
class EstadoContainer(EstadoNaoDeterministico):
def __init__(self, conjunto_estados):
# inicializa-se o objeto como um estado sem nome e não-final
super(EstadoContainer, self).__init__(nome='')
# a idéia aqui é encontrar os estados-raiz de cada elemento de conjunto_estados
self.conjunto_estados = []
for el in conjunto_estados:
if isinstance(el, EstadoContainer):
for estado in el.conjunto_estados:
if estado not in self.conjunto_estados:
self.conjunto_estados.append(estado)
elif isinstance(el, Estado):
if el not in self.conjunto_estados:
self.conjunto_estados.append(el)
self.conjunto_estados = sorted(self.conjunto_estados, key=lambda e: e.nome)
for estado in self.conjunto_estados:
self.nome += estado.nome
self.merge(estado, True)
def compara_conjunto(self, conjunto_estados):
temp = list(conjunto_estados)
for el in conjunto_estados:
if isinstance(el, EstadoContainer):
temp.remove(el)
for estado in el.conjunto_estados:
if estado not in temp:
temp.append(estado)
if len(self.conjunto_estados) == len(temp):
for el in self.conjunto_estados:
if el not in temp:
return False
return True
else:
return False
def cria_novo_estado(conjunto_estados):
"""
cria um novo estado a partir da fusao de dois ou mais outros
"""
novo_estado = EstadoContainer(conjunto_estados)
automato.estados[novo_estado.nome] = novo_estado
for simbolo in novo_estado.transicoes.keys():
if len(novo_estado[simbolo]) > 1:
lista_indeterminismos.append((novo_estado, simbolo))
for estado in automato:
for simbolo in estado.transicoes.keys():
if novo_estado.compara_conjunto(estado[simbolo]):
lista_indeterminismos.remove((estado, simbolo))
del estado[simbolo]
estado[simbolo] = novo_estado
def converter_para_deterministico(automato):
old_estados = automato.estados.values()
automato.deterministico = True
automato.estados = {}
for q in old_estados:
automato.add_estado(q.nome)
automato[q.nome].final = q.final
automato[q.nome].submaquinas_chamadas = q.submaquinas_chamadas
for s in q.transicoes.keys():
automato.add_estado(q[s][0].nome)
automato[q.nome][s] = automato[q[s][0].nome]
# cria uma lista inicial de indeterminismos
lista_indeterminismos = []
for estado in automato:
for simbolo in estado.transicoes.keys():
if len(estado[simbolo]) > 1:
lista_indeterminismos.append((estado, simbolo))
# itera por todos os indeterminismos
while lista_indeterminismos:
estado, simbolo = lista_indeterminismos[0]
cria_novo_estado(estado[simbolo])
# finalmente
converter_para_deterministico(automato)
def eliminar_estados_inacessiveis(automato, inicial='q0'):
estados = list(automato.estados.values())
visitados = []
pilha = [automato.estados[inicial]]
while pilha:
estadoAtual = pilha.pop()
visitados.append(estadoAtual)
for simbolo in estadoAtual.transicoes.keys():
if automato.deterministico:
proxEstado = estadoAtual[simbolo]
if (proxEstado not in visitados
and proxEstado not in pilha):
pilha.insert(0, proxEstado)
else: # se não é deterministico
for proxEstado in estadoAtual[simbolo]:
if (proxEstado not in visitados
and proxEstado not in pilha):
pilha.insert(0, proxEstado)
a_serem_removidos = [q.nome for q in estados if q not in visitados]
for estado in a_serem_removidos:
del automato.estados[estado]
def minimizador_de_Hopcroft(automato):
'''Retorna uma partição do conjunto de estados de um autômato
correspondente às classes de equivalência obtidas segundo
o algoritmo de minimização de Hopcroft.'''
def delta_R(P, a):
conj = []
for q in automato:
if a in q.simbolos and q[a] in P:
conj.append(q)
return conj
Grupos = [[],[]]
for q in automato:
if q.final:
Grupos[1].append(q)
else:
Grupos[0].append(q)
Ativo = [list(Grupos[1])]
while Ativo:
A = Ativo.pop()
for a in automato.alfabeto:
for G in Grupos:
delta = delta_R(A, a)
# G1 = G inter delta
G1 = [x for x in G if x in delta]
# G2 = G - G1
G2 = [x for x in G if x not in G1]
if G1 and G2:
Grupos.remove(G)
Grupos.append(G1)
Grupos.append(G2)
if G in Ativo:
Ativo.remove(G)
Ativo.append(G1)
Ativo.append(G2)
else:
if len(G1) < len(G2):
Ativo.append(G1)
else:
Ativo.append(G2)
return Grupos
def particao_para_automato_finito(particao, nome=None, alfabeto=None, inicial=0, apendice=None):
def acha(nome_estado):
for i in range(len(particao)):
for estado in particao[i]:
if estado.nome == nome_estado:
return i
return None
def gera_nome(n):
return 'q' + str(n)
if apendice is None:
af = AutomatoFinito(nome=nome, estados=[gera_nome(inicial)], estadoInicial=gera_nome(inicial), alfabeto=alfabeto)
else:
af = apendice
pilha = []
finais = []
transicoes_chamada = []
i = acha('q0')
nomes_classes = {
i: gera_nome(inicial)
}
pilha.append(particao[i][0])
cont = inicial
while pilha:
estado_atual = pilha.pop()
j = acha(estado_atual.nome)
qi = nomes_classes[j]
for s, qj in estado_atual.transicoes.items():
if qj is not None:
# acha o indice do conjunto, dentro da partição, a que pertence qj
i = acha(qj.nome)
if not i in nomes_classes:
cont += 1
nova_classe = gera_nome(cont)
nomes_classes[i] = nova_classe
pilha.append(particao[i][0])
af.add_estado(nova_classe)
af.add_transicao(de=qi, com=s, para=nomes_classes[i])
if s in estado_atual.submaquinas_chamadas:
transicoes_chamada.append((qi, s, nomes_classes[i]))
af[qi].final = estado_atual.final
if af[qi].final: finais.append(qi)
af[qi].submaquinas_chamadas = estado_atual.submaquinas_chamadas
if apendice is None:
return af
else:
return cont, transicoes_chamada, finais
| gpl-3.0 | -5,765,172,779,519,239,000 | 34.504032 | 121 | 0.535718 | false | 3.355564 | false | false | false |
3324fr/spinalcordtoolbox | scripts/isct_test_function.py | 1 | 17025 | #!/usr/bin/env python
#########################################################################################
#
# This function allows to run a function on a large dataset with a set of parameters.
# Results are extracted and saved in a way that they can easily be compared with another set.
#
# Data should be organized as the following:
# (names of images can be changed but must be passed as parameters to this function)
#
# data/
# ......subject_name_01/
# ......subject_name_02/
# .................t1/
# .........................subject_02_anything_t1.nii.gz
# .........................some_landmarks_of_vertebral_levels.nii.gz
# .........................subject_02_manual_segmentation_t1.nii.gz
# .................t2/
# .........................subject_02_anything_t2.nii.gz
# .........................some_landmarks_of_vertebral_levels.nii.gz
# .........................subject_02_manual_segmentation_t2.nii.gz
# .................t2star/
# .........................subject_02_anything_t2star.nii.gz
# .........................subject_02_manual_segmentation_t2star.nii.gz
# ......subject_name_03/
# .
# .
# .
#
# ---------------------------------------------------------------------------------------
# Copyright (c) 2015 Polytechnique Montreal <www.neuro.polymtl.ca>
# Author: Sara Dupont, Benjamin De Leener
# Modified: 2015-09-30
#
# About the license: see the file LICENSE.TXT
#########################################################################################
import sys
import commands
import platform
import signal
from time import time, strftime
from msct_parser import Parser
import sct_utils as sct
import os
import copy_reg
import types
import pandas as pd
import json
# get path of the toolbox
# TODO: put it back below when working again (julien 2016-04-04)
# <<<
# OLD
# status, path_sct = commands.getstatusoutput('echo $SCT_DIR')
# NEW
path_script = os.path.dirname(__file__)
path_sct = os.path.dirname(path_script)
# >>>
# append path that contains scripts, to be able to load modules
sys.path.append(path_sct + '/scripts')
sys.path.append(path_sct + '/testing')
def _pickle_method(method):
"""
Author: Steven Bethard (author of argparse)
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
func_name = method.im_func.__name__
obj = method.im_self
cls = method.im_class
cls_name = ''
if func_name.startswith('__') and not func_name.endswith('__'):
cls_name = cls.__name__.lstrip('_')
if cls_name:
func_name = '_' + cls_name + func_name
return _unpickle_method, (func_name, obj, cls)
def _unpickle_method(func_name, obj, cls):
"""
Author: Steven Bethard
http://bytes.com/topic/python/answers/552476-why-cant-you-pickle-instancemethods
"""
for cls in cls.mro():
try:
func = cls.__dict__[func_name]
except KeyError:
pass
else:
break
return func.__get__(obj, cls)
copy_reg.pickle(types.MethodType, _pickle_method, _unpickle_method)
def generate_data_list(folder_dataset, json_requirements=None, verbose=1):
"""
Construction of the data list from the data set
This function return a list of directory (in folder_dataset) in which the contrast is present.
:return data:
"""
data_subjects, subjects_dir = [], []
# each directory in folder_dataset should be a directory of a subject
for subject_dir in os.listdir(folder_dataset):
if not subject_dir.startswith('.') and os.path.isdir(folder_dataset + subject_dir):
if read_json(folder_dataset + subject_dir, json_requirements=json_requirements):
data_subjects.append(folder_dataset + subject_dir + '/')
subjects_dir.append(subject_dir)
if not data_subjects:
sct.printv('ERROR: No subject data were found in ' + folder_dataset + '. '
'Please organize your data correctly or provide a correct dataset.',
verbose=verbose, type='error')
return data_subjects, subjects_dir
def read_json(path_dir, json_requirements=None, fname_json='dataset_description.json'):
path_dir = sct.slash_at_the_end(path_dir, slash=1)
if fname_json not in os.listdir(path_dir) and json_requirements is not None:
accept_subject = False
elif json_requirements is None:
accept_subject = True
else:
json_file = open(path_dir+fname_json)
dic_info = json.load(json_file)
json_file.close()
# pass keys and items to lower case
dic_info = dict((k.lower(), v.lower()) for k, v in dic_info.iteritems())
# if no condition is not verified, accept subject
accept_subject = True
# read requirements:
list_conditions = json_requirements.split(',')
for condition in list_conditions:
key, val = condition.split('=')
key, val = key.lower(), val.lower()
# if key do not exist, do not accept subject
if key not in dic_info.keys():
accept_subject = False
# if value for this key is not the one required, do not accept subject
elif dic_info[key] != val:
accept_subject = False
return accept_subject
def process_results(results, subjects_name, function, folder_dataset, parameters):
try:
results_dataframe = pd.concat([result[2] for result in results])
results_dataframe.loc[:, 'subject'] = pd.Series(subjects_name, index=results_dataframe.index)
results_dataframe.loc[:, 'script'] = pd.Series([function]*len(subjects_name), index=results_dataframe.index)
results_dataframe.loc[:, 'dataset'] = pd.Series([folder_dataset]*len(subjects_name), index=results_dataframe.index)
results_dataframe.loc[:, 'parameters'] = pd.Series([parameters] * len(subjects_name), index=results_dataframe.index)
return results_dataframe
except KeyboardInterrupt:
return 'KeyboardException'
except Exception as e:
sct.printv('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), 1, 'warning')
sct.printv(str(e), 1, 'warning')
sys.exit(2)
def function_launcher(args):
import importlib
# append local script to PYTHONPATH for import
sys.path.append(os.path.abspath(os.curdir))
script_to_be_run = importlib.import_module('test_' + args[0]) # import function as a module
try:
output = script_to_be_run.test(*args[1:])
except:
import traceback
print('%s: %s' % ('test_' + args[0], traceback.format_exc()))
# output = (1, 'ERROR: Function crashed', 'No result')
from pandas import DataFrame
status_script = 1
output_script = 'ERROR: Function crashed.'
output = (status_script, output_script, DataFrame(data={'status': int(status_script), 'output': output_script}, index=['']))
return output
# return script_to_be_run.test(*args[1:])
def init_worker():
signal.signal(signal.SIGINT, signal.SIG_IGN)
def test_function(function, folder_dataset, parameters='', nb_cpu=None, json_requirements=None, verbose=1):
"""
Run a test function on the dataset using multiprocessing and save the results
:return: results
# results are organized as the following: tuple of (status, output, DataFrame with results)
"""
# generate data list from folder containing
data_subjects, subjects_name = generate_data_list(folder_dataset, json_requirements=json_requirements)
# All scripts that are using multithreading with ITK must not use it when using multiprocessing on several subjects
os.environ["ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS"] = "1"
from multiprocessing import Pool
# create datasets with parameters
import itertools
data_and_params = itertools.izip(itertools.repeat(function), data_subjects, itertools.repeat(parameters))
pool = Pool(processes=nb_cpu, initializer=init_worker)
try:
async_results = pool.map_async(function_launcher, data_and_params).get(9999999)
# results = process_results(async_results.get(9999999), subjects_name, function, folder_dataset, parameters) # get the sorted results once all jobs are finished
pool.close()
pool.join() # waiting for all the jobs to be done
results = process_results(async_results, subjects_name, function, folder_dataset, parameters) # get the sorted results once all jobs are finished
except KeyboardInterrupt:
print "\nWarning: Caught KeyboardInterrupt, terminating workers"
pool.terminate()
pool.join()
# return
# raise KeyboardInterrupt
# sys.exit(2)
except Exception as e:
sct.printv('Error on line {}'.format(sys.exc_info()[-1].tb_lineno), 1, 'warning')
sct.printv(str(e), 1, 'warning')
pool.terminate()
pool.join()
# raise Exception
# sys.exit(2)
return results
def get_parser():
# Initialize parser
parser = Parser(__file__)
# Mandatory arguments
parser.usage.set_description("")
parser.add_option(name="-f",
type_value="str",
description="Function to test.",
mandatory=True,
example="sct_propseg")
parser.add_option(name="-d",
type_value="folder",
description="Dataset directory.",
mandatory=True,
example="dataset_full/")
parser.add_option(name="-p",
type_value="str",
description="Arguments to pass to the function that is tested. Please put double-quotes if there are spaces in the list of parameters.\n"
"Image paths must be contains in the arguments list.",
mandatory=False)
parser.add_option(name="-json",
type_value="str",
description="Requirements on center, study, ... that must be satisfied by the json file of each tested subjects\n"
"Syntax: center=unf,study=errsm,gm_model=0",
mandatory=False)
parser.add_option(name="-cpu-nb",
type_value="int",
description="Number of CPU used for testing. 0: no multiprocessing. If not provided, "
"it uses all the available cores.",
mandatory=False,
default_value=0,
example='42')
parser.add_option(name="-log",
type_value='multiple_choice',
description="Redirects Terminal verbose to log file.",
mandatory=False,
example=['0', '1'],
default_value='1')
parser.add_option(name="-v",
type_value="multiple_choice",
description="Verbose. 0: nothing, 1: basic, 2: extended.",
mandatory=False,
example=['0', '1', '2'],
default_value='1')
return parser
# ====================================================================================================
# Start program
# ====================================================================================================
if __name__ == "__main__":
# get parameters
parser = get_parser()
arguments = parser.parse(sys.argv[1:])
function_to_test = arguments["-f"]
dataset = arguments["-d"]
dataset = sct.slash_at_the_end(dataset, slash=1)
parameters = ''
if "-p" in arguments:
parameters = arguments["-p"]
json_requirements = None
if "-json" in arguments:
json_requirements = arguments["-json"]
nb_cpu = None
if "-cpu-nb" in arguments:
nb_cpu = arguments["-cpu-nb"]
create_log = int(arguments['-log'])
verbose = arguments["-v"]
# start timer
start_time = time()
# create single time variable for output names
output_time = strftime("%y%m%d%H%M%S")
print 'Testing started on: '+strftime("%Y-%m-%d %H:%M:%S")
# build log file name
if create_log:
file_log = 'results_test_'+function_to_test+'_'+output_time
orig_stdout = sys.stdout
fname_log = file_log+'.log'
handle_log = file(fname_log, 'w')
# redirect to log file
sys.stdout = handle_log
print 'Testing started on: '+strftime("%Y-%m-%d %H:%M:%S")
# get path of the toolbox
path_script = os.path.dirname(__file__)
path_sct = os.path.dirname(path_script)
# fetch true commit number and branch (do not use commit.txt which is wrong)
path_curr = os.path.abspath(os.curdir)
os.chdir(path_sct)
sct_commit = commands.getoutput('git rev-parse HEAD')
if not sct_commit.isalnum():
print 'WARNING: Cannot retrieve SCT commit'
sct_commit = 'unknown'
sct_branch = 'unknown'
else:
sct_branch = commands.getoutput('git branch --contains '+sct_commit).strip('* ')
# with open (path_sct+"/version.txt", "r") as myfile:
# version_sct = myfile.read().replace('\n', '')
# with open (path_sct+"/commit.txt", "r") as myfile:
# commit_sct = myfile.read().replace('\n', '')
print 'SCT commit/branch: '+sct_commit+'/'+sct_branch
os.chdir(path_curr)
# check OS
platform_running = sys.platform
if (platform_running.find('darwin') != -1):
os_running = 'osx'
elif (platform_running.find('linux') != -1):
os_running = 'linux'
print 'OS: '+os_running+' ('+platform.platform()+')'
# check hostname
print 'Hostname:', platform.node()
# Check number of CPU cores
from multiprocessing import cpu_count
# status, output = sct.run('echo $ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS', 0)
print 'CPU cores: ' + str(cpu_count()) # + ', Used by SCT: '+output
# check RAM
sct.checkRAM(os_running, 0)
# test function
try:
results = test_function(function_to_test, dataset, parameters, nb_cpu, json_requirements, verbose)
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
results_subset = results.drop('script', 1).drop('dataset', 1).drop('parameters', 1).drop('output', 1)
results_display = results_subset
# save panda structure
if create_log:
results_subset.to_pickle(file_log+'.pickle')
# mean
results_mean = results_subset[results_subset.status != 200].mean(numeric_only=True)
results_mean['subject'] = 'Mean'
results_mean.set_value('status', float('NaN')) # set status to NaN
# results_display = results_display.append(results_mean, ignore_index=True)
# std
results_std = results_subset[results_subset.status != 200].std(numeric_only=True)
results_std['subject'] = 'STD'
results_std.set_value('status', float('NaN')) # set status to NaN
# results_display = results_display.append(results_std, ignore_index=True)
# count tests that passed
count_passed = results_subset.status[results_subset.status == 0].count()
count_crashed = results_subset.status[results_subset.status == 1].count()
# count tests that ran
count_ran = results_subset.status[results_subset.status != 200].count()
# results_display = results_display.set_index('subject')
# jcohenadad, 2015-10-27: added .reset_index() for better visual clarity
results_display = results_display.set_index('subject').reset_index()
print '\nCommand: "' + function_to_test + ' ' + parameters
print 'Dataset: ' + dataset
# display general results
print '\nGLOBAL RESULTS:'
elapsed_time = time() - start_time
print 'Duration: ' + str(int(round(elapsed_time)))+'s'
# display results
print 'Passed: ' + str(count_passed) + '/' + str(count_ran)
print 'Crashed: ' + str(count_crashed) + '/' + str(count_ran)
# build mean/std entries
dict_mean = results_mean.to_dict()
dict_mean.pop('status')
dict_mean.pop('subject')
print 'Mean: ' + str(dict_mean)
dict_std = results_std.to_dict()
dict_std.pop('status')
dict_std.pop('subject')
print 'STD: ' + str(dict_std)
# print detailed results
print '\nDETAILED RESULTS:'
print results_display.to_string()
print 'Status: 0: Passed | 1: Crashed | 99: Failed | 200: Input file(s) missing | 201: Ground-truth file(s) missing'
except Exception as err:
print err
# stop file redirection
if create_log:
sys.stdout.close()
sys.stdout = orig_stdout
# display log file to Terminal
handle_log = file(fname_log, 'r')
print handle_log.read()
| mit | 3,687,924,555,566,886,400 | 38.228111 | 169 | 0.589662 | false | 3.920101 | true | false | false |
SerpentCS/purchase-workflow | purchase_request/models/purchase_request.py | 1 | 10281 | # -*- coding: utf-8 -*-
# Copyright 2016 Eficent Business and IT Consulting Services S.L.
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl-3.0).
from openerp import api, fields, models
import openerp.addons.decimal_precision as dp
_STATES = [
('draft', 'Draft'),
('to_approve', 'To be approved'),
('approved', 'Approved'),
('rejected', 'Rejected')
]
class PurchaseRequest(models.Model):
_name = 'purchase.request'
_description = 'Purchase Request'
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.model
def _company_get(self):
company_id = self.env['res.company']._company_default_get(self._name)
return self.env['res.company'].browse(company_id.id)
@api.model
def _get_default_requested_by(self):
return self.env['res.users'].browse(self.env.uid)
@api.model
def _get_default_name(self):
return self.env['ir.sequence'].get('purchase.request')
@api.model
def _default_picking_type(self):
type_obj = self.env['stock.picking.type']
company_id = self.env.context.get('company_id') or \
self.env.user.company_id.id
types = type_obj.search([('code', '=', 'incoming'),
('warehouse_id.company_id', '=', company_id)])
if not types:
types = type_obj.search([('code', '=', 'incoming'),
('warehouse_id', '=', False)])
return types[:1]
@api.multi
@api.depends('state')
def _compute_is_editable(self):
for rec in self:
if rec.state in ('to_approve', 'approved', 'rejected'):
rec.is_editable = False
else:
rec.is_editable = True
@api.multi
def _track_subtype(self, init_values):
for rec in self:
if 'state' in init_values and rec.state == 'to_approve':
return 'purchase_request.mt_request_to_approve'
elif 'state' in init_values and rec.state == 'approved':
return 'purchase_request.mt_request_approved'
elif 'state' in init_values and rec.state == 'rejected':
return 'purchase_request.mt_request_rejected'
return super(PurchaseRequest, self)._track_subtype(init_values)
name = fields.Char('Request Reference', size=32, required=True,
default=_get_default_name,
track_visibility='onchange')
origin = fields.Char('Source Document', size=32)
date_start = fields.Date('Creation date',
help="Date when the user initiated the "
"request.",
default=fields.Date.context_today,
track_visibility='onchange')
requested_by = fields.Many2one('res.users',
'Requested by',
required=True,
track_visibility='onchange',
default=_get_default_requested_by)
assigned_to = fields.Many2one('res.users', 'Approver',
track_visibility='onchange')
description = fields.Text('Description')
company_id = fields.Many2one('res.company', 'Company',
required=True,
default=_company_get,
track_visibility='onchange')
line_ids = fields.One2many('purchase.request.line', 'request_id',
'Products to Purchase',
readonly=False,
copy=True,
track_visibility='onchange')
state = fields.Selection(selection=_STATES,
string='Status',
index=True,
track_visibility='onchange',
required=True,
copy=False,
default='draft')
is_editable = fields.Boolean(string="Is editable",
compute="_compute_is_editable",
readonly=True)
picking_type_id = fields.Many2one('stock.picking.type',
'Picking Type', required=True,
default=_default_picking_type)
@api.multi
def copy(self, default=None):
default = dict(default or {})
self.ensure_one()
default.update({
'state': 'draft',
'name': self.env['ir.sequence'].get('purchase.request'),
})
return super(PurchaseRequest, self).copy(default)
@api.model
def create(self, vals):
request = super(PurchaseRequest, self).create(vals)
if vals.get('assigned_to'):
request.message_subscribe_users(user_ids=[request.assigned_to.id])
return request
@api.multi
def write(self, vals):
res = super(PurchaseRequest, self).write(vals)
for request in self:
if vals.get('assigned_to'):
self.message_subscribe_users(user_ids=[request.assigned_to.id])
return res
@api.multi
def button_draft(self):
for rec in self:
rec.state = 'draft'
return True
@api.multi
def button_to_approve(self):
for rec in self:
rec.state = 'to_approve'
return True
@api.multi
def button_approved(self):
for rec in self:
rec.state = 'approved'
return True
@api.multi
def button_rejected(self):
for rec in self:
rec.state = 'rejected'
return True
class PurchaseRequestLine(models.Model):
_name = "purchase.request.line"
_description = "Purchase Request Line"
_inherit = ['mail.thread', 'ir.needaction_mixin']
@api.multi
@api.depends('product_id', 'name', 'product_uom_id', 'product_qty',
'analytic_account_id', 'date_required', 'specifications')
def _compute_is_editable(self):
for rec in self:
if rec.request_id.state in ('to_approve', 'approved', 'rejected'):
rec.is_editable = False
else:
rec.is_editable = True
@api.multi
def _compute_supplier_id(self):
for rec in self:
if rec.product_id:
if rec.product_id.seller_ids:
rec.supplier_id = rec.product_id.seller_ids[0].name
product_id = fields.Many2one(
'product.product', 'Product',
domain=[('purchase_ok', '=', True)],
track_visibility='onchange')
name = fields.Char('Description', size=256,
track_visibility='onchange')
product_uom_id = fields.Many2one('product.uom', 'Product Unit of Measure',
track_visibility='onchange')
product_qty = fields.Float('Quantity', track_visibility='onchange',
digits_compute=dp.get_precision(
'Product Unit of Measure'))
request_id = fields.Many2one('purchase.request',
'Purchase Request',
ondelete='cascade', readonly=True)
company_id = fields.Many2one('res.company',
related='request_id.company_id',
string='Company',
store=True, readonly=True)
analytic_account_id = fields.Many2one('account.analytic.account',
'Analytic Account',
track_visibility='onchange')
requested_by = fields.Many2one('res.users',
related='request_id.requested_by',
string='Requested by',
store=True, readonly=True)
assigned_to = fields.Many2one('res.users',
related='request_id.assigned_to',
string='Assigned to',
store=True, readonly=True)
date_start = fields.Date(related='request_id.date_start',
string='Request Date', readonly=True,
store=True)
description = fields.Text(related='request_id.description',
string='Description', readonly=True,
store=True)
origin = fields.Char(related='request_id.origin',
size=32, string='Source Document', readonly=True,
store=True)
date_required = fields.Date(string='Request Date', required=True,
track_visibility='onchange',
default=fields.Date.context_today)
is_editable = fields.Boolean(string='Is editable',
compute="_compute_is_editable",
readonly=True)
specifications = fields.Text(string='Specifications')
request_state = fields.Selection(string='Request state',
readonly=True,
related='request_id.state',
selection=_STATES,
store=True)
supplier_id = fields.Many2one('res.partner',
string='Preferred supplier',
compute="_compute_supplier_id")
procurement_id = fields.Many2one('procurement.order',
'Procurement Order',
readonly=True)
@api.onchange('product_id')
def onchange_product_id(self):
if self.product_id:
name = self.product_id.name
if self.product_id.code:
name = '[%s] %s' % (name, self.product_id.code)
if self.product_id.description_purchase:
name += '\n' + self.product_id.description_purchase
self.product_uom_id = self.product_id.uom_id.id
self.product_qty = 1
self.name = name
| agpl-3.0 | -740,625,282,075,647,100 | 40.289157 | 79 | 0.508803 | false | 4.616524 | false | false | false |
elzaggo/pydoop | pydoop/__init__.py | 1 | 5094 | # BEGIN_COPYRIGHT
#
# Copyright 2009-2018 CRS4.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy
# of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# END_COPYRIGHT
# DEV NOTE: some of the variables defined here (docstring included)
# are parsed by setup.py, check it before modifying them.
"""
Pydoop: a Python MapReduce and HDFS API for Hadoop
--------------------------------------------------
Pydoop is a Python interface to Hadoop that allows you to write
MapReduce applications and interact with HDFS in pure Python.
"""
import os
import errno
from importlib import import_module
import pydoop.hadoop_utils as hu
from pydoop.utils.py3compat import configparser, parser_read
try:
from pydoop.version import version as __version__
except ImportError: # should only happen at compile time
__version__ = None
_PATH_FINDER = hu.PathFinder()
_HADOOP_INFO = _PATH_FINDER.find() # fill the cache ASAP
__author__ = ", ".join((
"Simone Leo",
"Gianluigi Zanetti",
"Luca Pireddu",
"Francesco Cabras",
"Mauro Del Rio",
"Marco Enrico Piras",
))
__author_email__ = ", ".join((
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
"<[email protected]>",
))
__url__ = "http://crs4.github.io/pydoop"
__propfile_basename__ = "pydoop.properties"
def reset():
_PATH_FINDER.reset()
def hadoop_home():
return _PATH_FINDER.hadoop_home()
def hadoop_exec(hadoop_home=None):
return _PATH_FINDER.hadoop_exec(hadoop_home)
def mapred_exec(hadoop_home=None):
return _PATH_FINDER.mapred_exec(hadoop_home)
def hadoop_version(hadoop_home=None):
return _PATH_FINDER.hadoop_version(hadoop_home)
def hadoop_version_info(hadoop_home=None):
return _PATH_FINDER.hadoop_version_info(hadoop_home)
def has_mrv2(hadoop_home=None):
return _PATH_FINDER.hadoop_version_info(hadoop_home).has_mrv2()
def is_apache(hadoop_home=None):
return _PATH_FINDER.is_apache(hadoop_home)
def is_cloudera(hadoop_home=None):
return _PATH_FINDER.is_cloudera(hadoop_home)
def is_hortonworks(hadoop_home=None):
return _PATH_FINDER.is_hortonworks(hadoop_home)
def hadoop_conf(hadoop_home=None):
return _PATH_FINDER.hadoop_conf(hadoop_home)
def hadoop_params(hadoop_conf=None, hadoop_home=None):
return _PATH_FINDER.hadoop_params(hadoop_conf, hadoop_home)
def hadoop_native(hadoop_home=None):
return _PATH_FINDER.hadoop_native(hadoop_home)
def hadoop_classpath(hadoop_home=None):
return _PATH_FINDER.hadoop_classpath(hadoop_home)
def package_dir():
return os.path.dirname(os.path.abspath(__file__))
##############################
# Since Pydoop 1.0, we've stopped supporting installations for multiple
# Hadoop versions, so we only have a single module, so the following
# functions now return the same value regardless of the Hadoop version.
##############################
def jar_name(hadoop_vinfo=None):
return "pydoop.jar"
def jar_path(hadoop_vinfo=None):
path = os.path.join(package_dir(), jar_name())
if os.path.exists(path):
return path
else:
return None
def complete_mod_name(module, hadoop_vinfo=None):
return "%s.%s" % (__package__, module)
def import_version_specific_module(name):
return import_module(name)
# --- get properties ---
PROP_FN = os.path.join(
os.path.dirname(os.path.abspath(__file__)), __propfile_basename__
)
# http://stackoverflow.com/questions/2819696
class AddSectionWrapper(object):
SEC_NAME = 'dummy'
def __init__(self, f):
self.f = f
self.sechead = '[dummy]' + os.linesep
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
def readline(self):
if self.sechead:
try:
return self.sechead
finally:
self.sechead = None
else:
return self.f.readline()
def read_properties(fname):
parser = configparser.SafeConfigParser()
parser.optionxform = str # preserve key case
try:
with open(fname) as f:
parser_read(parser, AddSectionWrapper(f))
except IOError as e:
if e.errno != errno.ENOENT:
raise
return None # compile time, prop file is not there
return dict(parser.items(AddSectionWrapper.SEC_NAME))
class LocalModeNotSupported(RuntimeError):
def __init__(self):
msg = 'ERROR: Hadoop is configured to run in local mode'
super(LocalModeNotSupported, self).__init__(msg)
| apache-2.0 | 339,911,344,888,792,400 | 24.59799 | 77 | 0.665096 | false | 3.398266 | false | false | false |
mckinseyacademy/xblock-diagnosticfeedback | diagnostic_feedback/mixins.py | 1 | 3760 | from __future__ import absolute_import
import pkg_resources
from django import utils
from xblockutils.resources import ResourceLoader
from .config import student_assets, studio_assets
loader = ResourceLoader(__name__)
class XBlockWithTranslationServiceMixin(object):
"""
Mixin providing access to i18n service
"""
def _(self, text):
""" Translate text """
# noinspection PyUnresolvedReferences
return self.runtime.service(self, "i18n").ugettext(text)
class ResourceMixin(object):
"""
contain method to load css/js/htmll resource for student and studio view
"""
def sort_resources_by_order(self, lst):
return sorted(lst, key=lambda x: x[1])
@staticmethod
def resource_string(path):
"""Handy helper for getting resources."""
data = pkg_resources.resource_string(__name__, path)
return data.decode("utf8")
def get_translation_content(self):
"""
Returns JS content containing translations for user's language.
"""
try:
return self.resource_string('public/js/translations/{lang}/textjs.js'.format(
lang=utils.translation.to_locale(utils.translation.get_language()),
))
except IOError:
return self.resource_string('public/js/translations/en/textjs.js')
@property
def i18n_service(self):
""" Obtains translation service """
return self.runtime.service(self, "i18n")
def add_templates(self, fragment, context, view):
# add templates in html fragment for studio/student view
templates = self.sort_resources_by_order(student_assets.get('templates', [])
if view == 'student' else studio_assets.get('templates', [])
)
for template_obj in templates:
template = template_obj[0]
fragment.add_content(loader.render_django_template(template, context, i18n_service=self.i18n_service))
fragment.add_javascript(self.get_translation_content())
def add_css(self, fragment, view):
# add css in fragment for studio/student view
css_resources = self.sort_resources_by_order(student_assets.get('css', [])
if view == 'student' else studio_assets.get('css', [])
)
for css_obj in css_resources:
css = css_obj[0]
if css.startswith('http'):
fragment.add_css_url(css)
else:
fragment.add_css_url(self.runtime.local_resource_url(self, css))
def add_js(self, fragment, view):
# add css in fragment for studio/student view
js_resources = self.sort_resources_by_order(student_assets.get('js', [])
if view == 'student' else studio_assets.get('js', [])
)
for js_obj in js_resources:
js = js_obj[0]
if js.startswith('http'):
fragment.add_javascript_url(js)
else:
fragment.add_javascript_url(self.runtime.local_resource_url(self, js))
def initialize_js_classes(self, fragment, view, json_args):
# initialize js
js_classes = self.sort_resources_by_order(student_assets.get('js_classes', [])
if view == 'student' else studio_assets.get('js_classes', [])
)
for _class_obj in js_classes:
_class = _class_obj[0]
fragment.initialize_js(_class, json_args)
| agpl-3.0 | -4,865,101,097,367,604,000 | 37.367347 | 114 | 0.5625 | false | 4.413146 | false | false | false |
CStaich/Repository01 | RPG.py | 1 | 11997 | import random
from math import ceil
import simpy #for simulating battle
#Program written by Charlie Staich
# [email protected]
# in fulfillment of Katas excercise for Roto
# To use, simply run in a console. You will be prompted with an easy menu.
#Purpose: an RPG item generator and battle simulator
# Battle Process:
#give each player random head, chest, feet armor and random weapon
#begin battle
#repeat below until a player's health < 0
#player with higher Agility attacks first
#check for attack hit
# - miss: pass
# - hit: check for counterattacks
# - no counter: hit lands (damage stat and chance)
# - counter: deflect up to 1/3 damage back
#wait to swing again until after (atkspeed) seconds
#player with lower agility attacks
#same as above
class Item:
#usage: newItem = Item()
#usage for specific itemtype: newItem = Item(5)
#itemtypes listed below [0,5]
itemtypes = ["Head Armor", "Chest Armor", "Feet Armor", "Melee Weapon", "Ranged Weapon", "Magic Weapon"]
def __init__ (self, decltypeid=None):
#initialize item variables
if decltypeid is not None: #option to specify armor type
self.typeid = decltypeid
else:
self.typeid = random.randint(0,5)
self.level = random.randint(0,10)
self.type = Item.itemtypes[self.typeid]
self.itemclass = int(ceil((self.typeid+1)/3.0)) #1 = armor, 2 = weapon
#Weapons: all
if self.itemclass == 2:
self.atkspeed = random.uniform(1.5, 2.5)
self.atkchance = 0.9 + (self.level * 0.05)
self.atkdamage = random.randint(5,9) * self.level
self.dps = (self.atkspeed * self.atkdamage) * self.atkchance
#Weapon modifiers: Ranged
if self.typeid == 4:
self.atkspeed = self.atkspeed * 0.75
self.atkdamage = self.atkdamage * 0.5
self.atkchance = self.atkchance * 0.75
#Weapon modifiers: Magic
if self.typeid == 5:
self.atkspeed = self.atkspeed * 1.5
self.atkdamage = self.atkdamage * 2.0
self.atkchance = self.atkchance * 0.9
#Armor: percent dmg reduction (30%/45%/25% head/chest/feet)
elif self.typeid == 0: #head armor
self.dmgabsorb = 0.30 * self.level / 10.0 * random.uniform(0.8,1.0)
elif self.typeid == 1: #chest armor
self.dmgabsorb = 0.45 * self.level / 10.0 * random.uniform(0.8,1.0)
elif self.typeid ==2: #foot armor
self.dmgabsorb = 0.25 * self.level / 10.0 * random.uniform(0.8,1.0)
#stat boosts
self.stats = [0,0,0] #Strength, Agility, Health
self.allstats = 0
for i in range(2):
statchance = self.level * 0.08
if random.uniform(0.0,1.0) <= statchance:
statboost = self.level/2 * random.uniform(1.0, 4.0)
self.stats[i] = self.stats[i] + statboost
self.allstats = self.allstats + statboost
#store
if self.itemclass == 1: #armor pricing (no dps)
self.buyprice = (((self.dmgabsorb * 100) * self.level) + (self.level * self.allstats)) * 100
elif self.itemclass == 2: #weapon pricing
self.buyprice = ((self.dps * self.level) + (self.level * self.allstats)) * 100
self.sellprice = self.buyprice * random.uniform(2.0,5.0) / 10.0
self.name = self.namegen()
def namegen(self):
#Generates a name for an item based on type and level
if self.typeid == 0: #Helm
root = random.choice(["Helm", "Headpiece", "Mask", "Helmet", "Hood", "Cowl"])
elif self.typeid == 1: #Chest
root = random.choice(["Armor", "Chestplate", "Cuirass"])
elif self.typeid == 2: #Feet
root = random.choice(["Greaves", "Boots", "Leggings", "Legs", "Shin Guards"])
elif self.typeid == 3: #Melee Weapon
root = random.choice(["Sword", "Scimitar", "Lance", "Greatsword", "Axe", "War Axe", "Dagger", "Mace", "Warhammer"])
elif self.typeid == 4: #Ranged Weapon
root = random.choice(["Sling", "Bow", "Longbow", "Handcannon"])
elif self.typeid == 5: #Magic Weapon
root = random.choice(["Flame Staff", "Water Staff", "Earth Staff", "Air Staff"])
#Prefix
if self.level == 10:
prefix = "Legendary"
elif self.level > 8:
if self.itemclass == 1: #Armor
prefix = "Epic"
else: #Weapon
prefix = "Brutal"
elif self.level > 6:
if self.itemclass == 1:
prefix = "Reinforced"
elif self.typeid == 5:
prefix = "Wicked" #staff
else:
prefix = "Tempered" #other weapons
elif self.level > 4:
if self.itemclass == 1:
prefix = "Rugged"
elif self.typeid == 5: #staff
prefix = "Twisted"
else:
prefix = "Worn"
elif self.level > 2:
if self.itemclass == 1:
prefix = "Tattered"
elif self.typeid == 5:
prefix = "Battered"
else:
prefix = "Dull"
else:
prefix = "Broken"
#Suffix
if self.allstats == 0:
suffix = ""
elif (self.stats[0] >= self.stats[1]) and (self.stats[0] >= self.stats[2]):
#Strength Dominant
suffix = " of Strength"
elif self.stats[1] >= self.stats[2]:
#Agility Dominant
suffix = " of Agility"
else:
#Health Dominant
suffix = " of Health"
return(prefix + " " + root + suffix)
class Player:
#generate player with random stats, armor, and weapon.
def __init__(self, name):
self.name = name
self.helmet = Item(0)
self.chest = Item(1)
self.feet = Item(2)
self.weapontype = random.randint(3,5)
self.weapon = Item(self.weapontype)
self.armorlevel = self.helmet.dmgabsorb + self.chest.dmgabsorb + self.feet.dmgabsorb
self.dps = self.weapon.dps
self.basestats = [random.randint(10,20),random.randint(10,20),random.randint(0,25)]
self.statups = [sum(x) for x in zip(self.helmet.stats, self.chest.stats, self.feet.stats, self.weapon.stats)]
self.stats = [sum(x) for x in zip(self.basestats, self.statups)]
self.health = self.stats[2] + 100
#adjusted atkspeed with agility multiplier
self.atkspeed = self.weapon.atkspeed * (1 + (self.stats[1]/100))
#ajusted atkdamage with strength multiplier
self.atkdamage = self.weapon.atkdamage + self.stats[0]
self.atkchance = self.weapon.atkchance
def describe(self):
print "Player: %s Class: %s" % (self.name, self.weapon.type)
print " STR: %.1f AGI: %.1f HLT: %.1f" % (self.stats[0], self.stats[1], self.stats[2])
print " DMG: %.1f RATE: %.2f " % (self.atkdamage, self.atkspeed)
print " ARMOR: %.1f COUNTER: %.1f " % (self.armorlevel, self.stats[1]/100)
print "Equipped (TOTAL LVL %d): " % (self.weapon.level + self.helmet.level + self.chest.level + self.feet.level)
print " %s: LVL %d" % (self.weapon.name, self.weapon.level)
print " %s: LVL %d" % (self.helmet.name, self.helmet.level)
print " %s: LVL %d" % (self.chest.name, self.chest.level)
print " %s: LVL %d" % (self.feet.name, self.feet.level)
def attack(env, thisplayer, opponent):
#SimPy simulation for an attacking player
#player with lower agility swings first
if thisplayer.stats[1] < opponent.stats[1]:
yield env.timeout(thisplayer.atkspeed)
while True:
#check if both players are alive
if opponent.health <= 0:
winner = thisplayer.name
loser = opponent.name
print("[%.2f]: %s has slain %s! The battle is over." % (env.now, winner, loser))
env.exit(value=thisplayer.name)
elif thisplayer.health <= 0:
winner = opponent.name
loser = thisplayer.name
env.exit(value=opponent.name)
#swing attempt
if random.random() <= thisplayer.atkchance:
if random.random() <= opponent.stats[1]/200:
#opponent counterattacks up to 1/3 damage
armordeflect = random.uniform(thisplayer.armorlevel/2.0, thisplayer.armorlevel)
counterdamage = thisplayer.atkdamage * armordeflect * random.uniform(0.0,0.33)
print("[%.2f]: %s attacks, but %s counters with %s for %d damage" % (env.now, thisplayer.name, opponent.name, opponent.weapon.name, counterdamage))
thisplayer.health = thisplayer.health - counterdamage
else:
#hit
armordeflect = random.uniform(opponent.armorlevel/2.0, opponent.armorlevel)
hitdamage = thisplayer.atkdamage * armordeflect
print("[%.2f]: %s attacks %s with %s for %d damage" % (env.now, thisplayer.name, opponent.name, thisplayer.weapon.name, hitdamage))
opponent.health = opponent.health - hitdamage
else:
#miss
print("[%.2f]: %s misses %s" % (env.now, thisplayer.name, opponent.name))
yield env.timeout(thisplayer.atkspeed)
def runbattle():
print("= = = = =")
player1 = Player("Cain")
player2 = Player("Abel")
player1.describe()
print("= = = = =")
player2.describe()
env = simpy.rt.RealtimeEnvironment(initial_time=0, factor=1.0, strict=True)
env.process(attack(env, player1, player2))
env.process(attack(env, player2, player1))
print("= = = = =")
print("Running Simulation")
print("[time]: event")
env.run()
print("Simulation Complete")
print("= = = = =")
def main():
menu = {}
menu['1']="Generate random loot"
menu['2']="Generate specific type of loot"
menu['3']="Generate player with random loot"
menu['4']="Simulate battle between random players"
menu['5']="Exit"
typemenu = {}
typemenu['1']="Headpiece"
typemenu['2']="Chestpiece"
typemenu['3']="Footpiece"
typemenu['4']="Melee Weapon"
typemenu['5']="Ranged Weapon"
typemenu['6']="Magic Weapon"
while True:
print("= = = = = = = = = =")
options = menu.keys()
options.sort()
for entry in options:
print entry, menu[entry]
sel = raw_input("Enter # of sel: ")
if sel == '1':
newItem = Item()
print("= = = = =")
print newItem.name + " with attributes:"
print(vars(newItem))
elif sel == '2':
typeoptions = typemenu.keys()
typeoptions.sort()
for entry in typeoptions:
print " ", entry, typemenu[entry]
typesel = raw_input(" Enter # of sel: ")
newItem = Item(int(typesel) - 1)
print("= = = = =")
print newItem.name + " with attributes:"
print(vars(newItem))
elif sel == '3':
newName = raw_input( "Enter name for player: ")
newPlayer = Player(newName)
print("= = = = =")
newPlayer.describe()
elif sel == '4':
print("= = = = =")
runbattle()
elif sel == '5':
break
else:
print "Unknown Selection, try again."
if __name__ == "__main__":
main()
| mit | -8,551,126,958,085,172,000 | 41.154676 | 163 | 0.541385 | false | 3.492576 | false | false | false |
macosforge/ccs-calendarserver | txdav/common/datastore/podding/test/test_resource.py | 1 | 7770 | ##
# Copyright (c) 2005-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.clsprop import classproperty
import txweb2.dav.test.util
from txweb2 import http_headers, responsecode
from txweb2.dav.util import allDataFromStream
from txweb2.test.test_server import SimpleRequest
from twisted.internet.defer import inlineCallbacks, succeed
from txdav.caldav.datastore.scheduling.ischedule.localservers import (
ServersDB, Server
)
from txdav.common.datastore.podding.resource import ConduitResource
from txdav.common.datastore.test.util import populateCalendarsFrom, CommonCommonTests
import json
from txdav.common.datastore.podding.conduit import PoddingConduit
class ConduitPOST (CommonCommonTests, txweb2.dav.test.util.TestCase):
class FakeConduit(PoddingConduit):
def recv_fake(self, txn, j):
return succeed({
"back2u": j["echo"],
"more": "bits",
})
@inlineCallbacks
def setUp(self):
yield super(ConduitPOST, self).setUp()
serversDB = ServersDB()
self.thisServer = Server("A", "http://127.0.0.1", "A", True)
serversDB.addServer(self.thisServer)
yield self.buildStoreAndDirectory(serversDB=serversDB)
self.site.resource.putChild("conduit", ConduitResource(self.site.resource, self.storeUnderTest()))
yield self.populate()
@inlineCallbacks
def populate(self):
yield populateCalendarsFrom(self.requirements, self.storeUnderTest())
self.notifierFactory.reset()
@classproperty(cache=False)
def requirements(cls): # @NoSelf
return {
"user01": {
"calendar_1": {
},
"inbox": {
},
},
"user02": {
"calendar_1": {
},
"inbox": {
},
},
"user03": {
"calendar_1": {
},
"inbox": {
},
},
}
@inlineCallbacks
def test_receive_no_secret(self):
"""
Cross-pod request fails when there is no shared secret header present.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("text/plain",)
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.FORBIDDEN)
@inlineCallbacks
def test_receive_wrong_mime(self):
"""
Cross-pod request fails when Content-Type header is wrong.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("text/plain",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_invalid_json(self):
"""
Cross-pod request fails when request data is not JSON.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""Hello, World!
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_bad_json(self):
"""
Cross-pod request fails when JSON data does not have an "action".
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"foo":"bar"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_ping(self):
"""
Cross-pod request works with the "ping" action.
"""
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"ping"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.OK)
data = (yield allDataFromStream(response.stream))
j = json.loads(data)
self.assertTrue("result" in j)
self.assertEqual(j["result"], "ok")
@inlineCallbacks
def test_receive_fake_conduit_no_action(self):
"""
Cross-pod request fails when conduit does not support the action.
"""
store = self.storeUnderTest()
self.patch(store, "conduit", self.FakeConduit(store))
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"bogus",
"echo":"bravo"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.BAD_REQUEST)
@inlineCallbacks
def test_receive_fake_conduit(self):
"""
Cross-pod request works when conduit does support the action.
"""
store = self.storeUnderTest()
self.patch(store, "conduit", self.FakeConduit(store))
request = SimpleRequest(
self.site,
"POST",
"/conduit",
headers=http_headers.Headers(rawHeaders={
"Content-Type": ("application/json",),
self.thisServer.secretHeader()[0]: self.thisServer.secretHeader()[1],
}),
content="""
{
"action":"fake",
"echo":"bravo"
}
""".replace("\n", "\r\n")
)
response = (yield self.send(request))
self.assertEqual(response.code, responsecode.OK)
data = (yield allDataFromStream(response.stream))
j = json.loads(data)
self.assertTrue("result" in j)
self.assertEqual(j["result"], "ok")
self.assertTrue("value" in j)
self.assertEqual(j["value"], {"back2u": "bravo", "more": "bits"})
| apache-2.0 | 5,385,579,506,552,117,000 | 28.884615 | 106 | 0.566924 | false | 4.132979 | true | false | false |
cbode/ssr | ssr_algore.py | 1 | 8571 | #!/usr/bin/env python
############################################################################
#
# MODULE: ssr_algore.py
# AUTHOR: Collin Bode, UC Berkeley
#
# PURPOSE:
# Al Gore Rhythm combines r.sun model with Light Penetration Index (LPI).
# Merges all the r.sun solar radiation runs into a single estimate of
# Total Solar Radiation in watt-hours per meter squared per day.
# Optional clear sky vs real sky. <-- only clear sky for now.
#
# Modified: Collin Bode, October, 2012
# Migrated to unified parameter set.
# Simplified all the tweaks: JuneLPI kept, removed normalization for LPI
# R.sun calibration now serparated from algorithm ("HalfDiff")
#
# COPYRIGHT: (c) 2011 Collin Bode
# (c) 2006 Hamish Bowman, and the GRASS Development Team
# (c) 2008 Glynn Clements, and the GRASS Development Team
# This program is free software under the GNU General Public
# License (>=v2). Read the file COPYING that comes with GRASS
# for details.
#
#############################################################################
# GLOBALS
global lf
global cores
global gisbase
global gisdbase
# MODULES
# GRASS & SSR environment setup for external use
from ssr_params import *
import os
import sys
gisdbase = os.path.abspath(gisdbase)
os.environ['GISBASE'] = gisbase
sys.path.append(os.path.join(os.environ['GISBASE'], "etc", "python"))
import grass.script as grass
import grass.script.setup as gsetup
# ssr_utilities must go after grass.script imports
from ssr_utilities import *
def main():
gsetup.init(gisbase, gisdbase, location, 'PERMANENT')
# Algorithms for combining Diffuse and Direct
# 'd' = old default value of 1,
# 'pl' = Power Law,Diffuse = 1.1224 * x^0.3157, R2 = 0.41. Direct = = 1.2567 * x, R2 = 0.78
# 'nl' = Natural Log,
# 'cl' = Cameau Linear, 'cn' = Cameau linear Normalized, nLPI = 1.428 * LPI, Diffuse = 0.94 * nLPI
# 'gn' = Gendron linear normalized, nLPI = 1.428 * LPI, Diffuse = 0.01719 + 1.024 * nLPI
# 'gl' = Gendron linear. no normalization. It overestimates field radiation.
# Input bare-earth r.sun diffuse is too high. Instead of changing Linke Turbidity, modified here.
# See weatherstations.xlsx for analysis.
# Open log file
tlog = dt.datetime.strftime(dt.datetime.now(),"%Y-%m-%d_h%Hm%M")
lf = open(gisdbase+os.sep+'ssr_'+tlog+'_algore.log', 'a')
# Overwrite files?
ow = int(algore_run -1)
# Print parameters
printout('---------------------------------------',lf)
printout('-- ALGORITHM FOR CLEAR SKY RADIATION --',lf)
printout(' LPI year: '+year,lf)
printout(' LPI pref: '+lpipref,lf)
printout(' region: '+bregion,lf)
printout(' sun mapset: '+msun,lf)
printout(' SSR output mapset: '+mssr,lf)
printout(' max veg height: '+maxheight,lf)
printout(' Algorithm code: '+algore,lf)
printout('keep intermediates: '+str(keeptemp),lf)
printout(' overwrite files: '+str(ow),lf)
printout('---------------------------------------',lf)
# Run Algorithm
r1start = dt.datetime.now()
printout("Starting Al Gore Rhythm at "+str(r1start),lf)
# Goto Correct Mapset and make sure Region is correctly set (ssr_utilities)
mapset_gotocreate(mssr,bregion,C,lf)
# For each week
for doyn in range(5,366,7):
doy = str(doyn).zfill(3)
month = dt.datetime.strftime(dt.datetime(2011,1,1) + dt.timedelta(doyn -1),"%m")
printout("Processing Day of Year " + doy + " in month "+month,lf)
# Input Raster Layers
sundem = bregion + C + 'mdem'
suncan = bregion + C + 'mcan'
dembeam = sundem + doy + 'beam@'+msun
demdiff = sundem + doy + 'diff@'+msun
canbeam = suncan + doy + 'beam@'+msun
candiff = suncan + doy + 'diff@'+msun
canglob = suncan + doy + 'glob'
veg = vegheight+'@PERMANENT'
lpi = lpipref + 'm'+ month + '@' + mlpi # lpi_c30y14s17m01
if(lpivsjune == True):
lpi = lpipref + '06@' + mlpi
# Output Raster Layers
lpipart = C + 'm' + year + 's' + boxsize + 'm' + algore
if(lpivsjune == True):
lpipart = C + 'm' + year + 's' + boxsize+'mjune' + algore
ssr = 'ssr_'+ lpipart + doy
opencanopy = 'opencanopy_' + lpipart + doy
subcanopy = 'subcanopy_' + lpipart + doy
lpibeam = 'subbeam_' + lpipart + doy
lpidiff = 'subdiff_' + lpipart + doy
###################################################################
#1. SUBCANOPY Merge LPI and Bare-earth by Algorithm
printout("DOY "+doy+" 1. merging lpi and dem using: "+algore,lf)
if(algore == 'cl'): # 'cl' Cameau Linear regression
grass.mapcalc("$tmp_lpidiff = 0.94 * $lpi * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = $beam * $lpi", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
elif(algore == 'cn'): # 'cn' Cameau Normalized - assumes halfdiff is set to True
grass.mapcalc("$tmp_lpidiff = 0.94 * (1.428 * $lpi) * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = 1.428 * $beam * $lpi", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
elif(algore == 'gn'): #gn Diffuse Gendron Linear Normalized. y = 0.01719 + 1.024 * nLPI
grass.mapcalc("$tmp_lpidiff = 0.01719 + 1.024 * (1.428 * $lpi) * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = (1.428 * $lpi) * $beam", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
elif(algore == 'gl'): #gl Diffuse Gendron Linear NON-normalized y = 0.01719 + 1.024 * LPI
grass.mapcalc("$tmp_lpidiff = 0.01719 + 1.024 * $lpi * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = $lpi * $beam", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
else: # 'pl' power law
grass.mapcalc("$tmp_lpidiff = 1.1224 * ($lpi^0.3157) * $diff", tmp_lpidiff = lpidiff, diff = demdiff, lpi = lpi,overwrite = ow)
grass.mapcalc("$tmp_lpibeam = 1.2567 * $beam * $lpi", tmp_lpibeam = lpibeam, beam = dembeam, lpi = lpi,overwrite = ow)
grass.mapcalc("$subcanopy = $tmp_lpibeam + $tmp_lpidiff", subcanopy = subcanopy, tmp_lpidiff = lpidiff, tmp_lpibeam = lpibeam, overwrite = ow)
###################################################################
#2. OPEN CANOPY: Remove areas under tall trees (maxheight meters or higher)
printout('DOY '+doy+' 2. set subcanopy values to -88',lf)
grass.mapcalc("$canglob = $canbeam + $candiff",canglob = canglob, canbeam = canbeam, candiff = candiff,overwrite = ow)
grass.mapcalc("$opencanopy = if($veg < $maxheight, $canglob,-88)",opencanopy = opencanopy, veg = veg, canglob = canglob, maxheight = maxheight,overwrite = ow)
###################################################################
#3. Merge lpi*bare-earth with cleaned canopy, keeping whichever is higher.
printout("DOY "+doy+" 3. Merge lpi*dem with canopy shade = "+ssr,lf)
grass.mapcalc("$ssr = if($opencanopy > $subcanopy, $opencanopy, $subcanopy)", opencanopy = opencanopy, subcanopy = subcanopy,ssr = ssr,overwrite = ow)
grass.run_command("r.colors",map = ssr, color = "bcyr")
#4. Remove temp maps
if(keeptemp == False):
for raster in [lpibeam,lpidiff,opencanopy,subcanopy,canglob]:
grass.run_command("g.remove",rast=raster)
# Reset GRASS env values
grass.run_command("g.mapset", mapset="PERMANENT")
grass.run_command("g.region", flags = "d")
r1end = dt.datetime.now()
printout("Al can shake his booty, 'cause...",lf)
printout("DONE! with Al Gore Rhythm at "+str(r1end),lf)
printout("--------------------------------------",lf)
lf.close()
sys.exit("FINISHED.")
if __name__ == "__main__":
main()
"""
try:
#options, flags = grass.parser()
main()
except:
printout('ERROR! quitting.')
print traceback.print_exc()
traceback.print_exc(file=lf)
traceback.print_exc(file=sys.stdout)
finally:
lf.close()
sys.exit("FINISHED.")
"""
| gpl-2.0 | -8,142,489,074,652,205,000 | 46.882682 | 166 | 0.575779 | false | 3.088649 | false | false | false |
joelfrederico/Blowout | blowout/support.py | 1 | 4201 | import h5py as _h5
import numpy as _np
import logging as _logging
import time as _time
_logger = _logging.getLogger(__name__)
import ipdb as pdb
import re as _re
def _timestamp2filename(cls, ftype, filename=None):
# ======================================
# Get filename from timestamp
# ======================================
if filename is not None:
filename = '{}.{}.h5'.format(filename, ftype)
else:
try:
timestamp = cls.timestamp
except RuntimeError as err:
_logger.debug('Handled exception: {}'.format(err))
timestamp = _time.localtime()
filename = _time.strftime('%Y.%m.%d.%H%M.%S.{}.h5'.format(ftype), timestamp)
return filename
class Timestamp(object):
def __init__(self):
self._timestamp = None
def _set_timestamp(self, timestamp):
self._timestamp = timestamp
@property
def timestamp(self):
if self._timestamp is not None:
return self._timestamp
else:
raise RuntimeError('No timestamp: simulation not completed.')
def _write_arrays(group, name, data, parent=None):
grefs = group.create_group('_refs_{}'.format(name))
ref_dtype = _h5.special_dtype(ref=_h5.Reference)
dname = group.create_dataset(name, (_np.size(data),), dtype=ref_dtype)
# ======================================
# Create datasets
# ======================================
for i, array in enumerate(data):
if array.dtype == _np.dtype(object):
# ======================================
# If dataset can't be created, nest
# ======================================
darray = _write_arrays(grefs, '{}'.format(i), array, parent=name)
else:
darray = grefs.create_dataset(name='{}'.format(i), data=array, shape=_np.shape(array), compression="gzip")
# ======================================
# Store reference in dataset
# ======================================
dname[i] = darray.ref
# if parent == 'hist':
# pdb.set_trace()
# ======================================
# Return created dataset
# ======================================
return dname
def _read_arrays(group, name):
refs = group[name]
arrays = _np.empty(shape=refs.size, dtype=object)
for i, ref in enumerate(refs):
arrays[i] = group.file[ref].value
return arrays
def _write_scalars(group, name, data):
return group.create_dataset(name=name, data=data, shape=_np.shape(data), compression="gzip")
def _write_data(group, name, data):
if data.dtype == _np.dtype(object):
_write_arrays(group, name, data)
else:
_write_scalars(group, name, data)
def _read_dict(group, name):
ret_group = group[name]
names = ret_group.keys()
valid_names = list()
underscore = _re.compile('_')
dict_layout = {'names': [], 'formats': []}
for nm in names:
if not underscore.match(nm):
valid_names.append(nm)
dict_layout['names'].append(nm)
if type(ret_group[nm].value[0]) == _h5.h5r.Reference:
dict_layout['formats'].append(object)
else:
raise NotImplementedError('Haven''t done this...')
results_flat = _np.zeros(len(ret_group[valid_names[0]]), dtype=dict_layout)
for nm in valid_names:
# if nm == 'hist':
# pdb.set_trace()
values = ret_group[nm]
for i, value in enumerate(values):
try:
array = group.file[value].value
if array.size > 0:
if type(array[0]) == _h5.h5r.Reference:
out = _np.empty(len(array), dtype=object)
for j, val in enumerate(array):
out[j] = group.file[val].value
else:
out = group.file[value].value
else:
out = _np.array([])
results_flat[nm][i] = out
except ValueError:
_logger.debug('There was a ValueError')
# pdb.set_trace()
return results_flat
| mit | 3,573,787,336,411,098,000 | 30.586466 | 118 | 0.502261 | false | 4.110568 | false | false | false |
Saevon/spacebattle | menu/controllers.py | 1 | 4003 | from abstract.event_manager import EventManager, Mods
from pygame import locals as const
from ship import Ship
# Missing Mouse button constants
const.MOUSEKEY_LEFT = 1
const.MOUSEKEY_MIDDLE = 2
const.MOUSEKEY_RIGHT = 3
const.MOUSEKEY_SCROLLUP = 4
const.MOUSEKEY_SCROLLDOWN = 5
# Start up our pause_handler
pause_handler = EventManager()
# Start up our game_handler
game_handler = EventManager()
@pause_handler.quit
@pause_handler.shortcut(Mods.META, const.K_q)
@pause_handler.shortcut(Mods.ALT, const.K_q)
@game_handler.quit
@game_handler.shortcut(Mods.META, const.K_q)
@game_handler.shortcut(Mods.ALT, const.K_q)
def quit(context):
raise context.mediator.PopEvent()
@game_handler.keydown(const.K_SPACE)
def pause(context):
context.mediator.pause()
@pause_handler.keydown(const.K_SPACE)
def unpause(context):
raise context.mediator.ResumeEvent()
#################################################
# Ship Controls
# Player 1
@game_handler.keydown(const.K_d, const={'player': 1})
@game_handler.keydown(const.K_l, const={'player': 2})
@game_handler.keydown(const.K_RIGHT, const={'player': 3})
@game_handler.keydown(const.K_h, const={'player': 4})
def rotate_right(context, player):
context.mediator.players[player].rotate(Ship.ROT_RIGHT)
@game_handler.keyup(const.K_d, const={'player': 1})
@game_handler.keyup(const.K_l, const={'player': 2})
@game_handler.keyup(const.K_RIGHT, const={'player': 3})
@game_handler.keyup(const.K_h, const={'player': 4})
def rotate_right_stop(context, player):
context.mediator.players[player].rotate(Ship.ROT_RIGHT, stop=True)
@game_handler.keydown(const.K_a, const={'player': 1})
@game_handler.keydown(const.K_j, const={'player': 2})
@game_handler.keydown(const.K_LEFT, const={'player': 3})
@game_handler.keydown(const.K_f, const={'player': 4})
def rotate_left(context, player):
context.mediator.players[player].rotate(Ship.ROT_LEFT)
@game_handler.keyup(const.K_a, const={'player': 1})
@game_handler.keyup(const.K_j, const={'player': 2})
@game_handler.keyup(const.K_LEFT, const={'player': 3})
@game_handler.keyup(const.K_f, const={'player': 4})
def rotate_left_stop(context, player):
context.mediator.players[player].rotate(Ship.ROT_LEFT, stop=True)
@game_handler.keydown(const.K_w, const={'player': 1})
@game_handler.keydown(const.K_i, const={'player': 2})
@game_handler.keydown(const.K_UP, const={'player': 3})
@game_handler.keydown(const.K_t, const={'player': 4})
def move_up(context, player):
context.mediator.players[player].move(Ship.MOV_FORWARDS)
@game_handler.keyup(const.K_w, const={'player': 1})
@game_handler.keyup(const.K_i, const={'player': 2})
@game_handler.keyup(const.K_UP, const={'player': 3})
@game_handler.keyup(const.K_t, const={'player': 4})
def move_up_stop(context, player):
context.mediator.players[player].move(Ship.MOV_FORWARDS, stop=True)
@game_handler.keydown(const.K_s, const={'player': 1})
@game_handler.keydown(const.K_k, const={'player': 2})
@game_handler.keydown(const.K_DOWN, const={'player': 3})
@game_handler.keydown(const.K_g, const={'player': 4})
def move_down(context, player):
context.mediator.players[player].move(Ship.MOV_BACKWARDS)
@game_handler.keyup(const.K_s, const={'player': 1})
@game_handler.keyup(const.K_k, const={'player': 2})
@game_handler.keyup(const.K_DOWN, const={'player': 3})
@game_handler.keyup(const.K_g, const={'player': 4})
def move_down_stop(context, player):
context.mediator.players[player].move(Ship.MOV_BACKWARDS, stop=True)
#################################################
# Debug only code
DEBUG = False
if DEBUG:
def locate_key(val):
for key, value in const.__dict__.iteritems():
if value == val and (key.startswith('K_') or key.startswith('KMOD')):
print key
@game_handler.event(const.KEYDOWN)
def debug_keys(context):
print '-' * 15
print '-- Key'
locate_key(context.event.key)
print '-- Mod'
locate_key(context.event.mod)
print '-' * 15
| mit | 7,717,589,673,143,250,000 | 32.638655 | 81 | 0.681989 | false | 2.945548 | false | false | false |
suutari-ai/shoop | shuup/campaigns/models/campaigns.py | 2 | 18214 | # This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import random
import string
from django.conf import settings
from django.core.exceptions import ValidationError
from django.db import models
from django.db.models import Q
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from enumfields import Enum
from parler.models import TranslatableModel, TranslatedFields
from shuup.campaigns.consts import (
CAMPAIGNS_CACHE_NAMESPACE, CATALOG_FILTER_CACHE_NAMESPACE,
CONTEXT_CONDITION_CACHE_NAMESPACE
)
from shuup.campaigns.models.basket_conditions import (
CategoryProductsBasketCondition, ProductsInBasketCondition
)
from shuup.campaigns.utils.campaigns import get_product_ids_and_quantities
from shuup.campaigns.utils.matcher import get_matching_for_product
from shuup.core import cache
from shuup.core.fields import InternalIdentifierField
from shuup.core.models import Category, Order, Shop
from shuup.core.utils import context_cache
from shuup.utils.analog import define_log_model
from shuup.utils.properties import MoneyPropped
class CampaignType(Enum):
CATALOG = 1
BASKET = 2
class CampaignQueryset(models.QuerySet):
def available(self, shop=None):
query = Q(
Q(active=True) &
(Q(start_datetime__isnull=True) | Q(start_datetime__lte=now())) &
(Q(end_datetime__isnull=True) | Q(end_datetime__gte=now()))
)
if shop:
query &= Q(shop=shop)
return self.filter(query)
class Campaign(MoneyPropped, TranslatableModel):
admin_url_suffix = None
shop = models.ForeignKey(Shop, verbose_name=_("shop"), help_text=_("The shop where the campaign is active."))
name = models.CharField(max_length=120, verbose_name=_("name"), help_text=_("The name for this campaign."))
# translations in subclass
identifier = InternalIdentifierField(unique=True)
active = models.BooleanField(default=False, verbose_name=_("active"), help_text=_(
"Check this if the campaign is currently active. Please also set a start and end date."
))
start_datetime = models.DateTimeField(null=True, blank=True, verbose_name=_("start date and time"), help_text=_(
"The date and time the campaign starts. This is only applicable if the campaign is marked as active."
))
end_datetime = models.DateTimeField(null=True, blank=True, verbose_name=_("end date and time"), help_text=_(
"The date and time the campaign ends. This is only applicable if the campaign is marked as active."
))
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("created by"))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("modified by"))
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_("created on"))
modified_on = models.DateTimeField(auto_now=True, editable=False, verbose_name=_("modified on"))
objects = CampaignQueryset.as_manager()
class Meta:
abstract = True
verbose_name = _('Campaign')
verbose_name_plural = _('Campaigns')
def save(self, *args, **kwargs):
super(Campaign, self).save(*args, **kwargs)
cache.bump_version(CAMPAIGNS_CACHE_NAMESPACE)
cache.bump_version(CONTEXT_CONDITION_CACHE_NAMESPACE)
cache.bump_version(CATALOG_FILTER_CACHE_NAMESPACE)
def is_available(self):
if not self.active: # move to manager?
return False
if self.start_datetime and self.end_datetime:
if self.start_datetime <= now() <= self.end_datetime:
return True
return False
elif self.start_datetime and not self.end_datetime:
if self.start_datetime > now():
return False
elif not self.start_datetime and self.end_datetime:
if self.end_datetime < now():
return False
return True
@property
def type(self):
return CampaignType.BASKET if isinstance(self, BasketCampaign) else CampaignType.CATALOG
class CatalogCampaign(Campaign):
_queryset = None
admin_url_suffix = "catalog_campaign"
conditions = models.ManyToManyField('ContextCondition', blank=True, related_name='campaign')
filters = models.ManyToManyField('CatalogFilter', blank=True, related_name='campaign')
translations = TranslatedFields(public_name=models.CharField(max_length=120, blank=True, help_text=_(
"The campaign name to show in the store front."
)))
def __str__(self):
return force_text(_("Catalog Campaign: %(name)s" % dict(name=self.name)))
def save(self, *args, **kwargs):
super(CatalogCampaign, self).save(*args, **kwargs)
self.filters.update(active=self.active)
for f in self.filters.all():
for matching_product in f.get_matching_shop_products():
context_cache.bump_cache_for_shop_product(matching_product)
self.conditions.update(active=self.active)
def rules_match(self, context, shop_product, matching_catalog_filters, matching_context_conditions):
if not self.is_available():
return False
# If rule has filters, all of them has to match
for filter_pk in self.filters.values_list("pk", flat=True):
if filter_pk not in matching_catalog_filters:
return False
# All filters match so let's check that also all the conditions match
for condition_pk in self.conditions.values_list("pk", flat=True):
if condition_pk not in matching_context_conditions:
return False
return True
@classmethod
def get_for_product(cls, shop_product):
matching_filters = get_matching_for_product(shop_product, provide_category="campaign_catalog_filter")
matching_conditions = get_matching_for_product(shop_product, provide_category="campaign_context_condition")
query_filter = Q(Q(filters__in=matching_filters) | Q(conditions__in=matching_conditions))
return cls.objects.available(shop=shop_product.shop).filter(query_filter).distinct()
@classmethod
def get_matching(cls, context, shop_product):
prod_ctx_cache_elements = dict(
customer=context.customer.pk or 0,
shop=context.shop.pk,
product_id=shop_product.pk)
namespace = CAMPAIGNS_CACHE_NAMESPACE
key = "%s:%s" % (namespace, hash(frozenset(prod_ctx_cache_elements.items())))
cached_matching = cache.get(key, None)
if cached_matching is not None:
return cached_matching
from shuup.campaigns.models.matching import get_matching_context_conditions, get_matching_catalog_filters
matching_context_conditions = get_matching_context_conditions(context)
matching_catalog_filters = get_matching_catalog_filters(shop_product)
if not (matching_context_conditions or matching_catalog_filters):
return []
# Get all possible campaign id's for matching context_conditions
campaigns_based_on_conditions = set(
cls.objects.filter(
active=True,
shop=context.shop,
conditions__id__in=matching_context_conditions
).values_list("pk", flat=True)
)
campaigns_based_on_catalog_filters = set()
if hasattr(cls, "filters"):
# Get all possible campaigns for matching catalog_filters
campaigns_based_on_catalog_filters = set(
cls.objects.filter(
active=True,
shop=context.shop,
filters__id__in=matching_catalog_filters
).values_list("pk", flat=True)
)
all_possible_campaigns_ids = (campaigns_based_on_conditions | campaigns_based_on_catalog_filters)
matching = []
for campaign in cls.objects.filter(id__in=all_possible_campaigns_ids):
if campaign.rules_match(context, shop_product, matching_catalog_filters, matching_context_conditions):
matching.append(campaign)
cache.set(key, matching, timeout=None)
return matching
class BasketCampaign(Campaign):
admin_url_suffix = "basket_campaign"
basket_line_text = models.CharField(
max_length=120, verbose_name=_("basket line text"), help_text=_("This text will be shown in basket."))
conditions = models.ManyToManyField('BasketCondition', blank=True, related_name='campaign')
coupon = models.OneToOneField('Coupon', null=True, blank=True, related_name='campaign', verbose_name=_("coupon"))
translations = TranslatedFields(
public_name=models.CharField(max_length=120, verbose_name=_("public name"), help_text=_(
"The campaign name to show in the store front."
))
)
def __str__(self):
return force_text(_("Basket Campaign: %(name)s" % dict(name=self.name)))
def save(self, *args, **kwargs):
if self.coupon:
code_count_for_shop = BasketCampaign.objects.filter(
active=True, shop_id=self.shop.id, coupon__code=self.coupon.code)
if not self.id and code_count_for_shop.exists():
raise ValidationError(_("Can not have multiple active campaigns with same code."))
if self.id and code_count_for_shop.exclude(coupon_id=self.coupon.id).exists():
raise ValidationError(_("Can not have multiple active campaigns with same code."))
super(BasketCampaign, self).save(*args, **kwargs)
self.conditions.update(active=self.active)
@classmethod
def get_for_product(cls, shop_product):
matching_conditions = get_matching_for_product(
shop_product, provide_category="campaign_basket_condition")
matching_effects = get_matching_for_product(
shop_product, provide_category="campaign_basket_discount_effect_form")
matching_line_effects = get_matching_for_product(
shop_product, provide_category="campaign_basket_line_effect_form")
effects_q = Q(Q(line_effects__id__in=matching_line_effects) | Q(discount_effects__id__in=matching_effects))
matching_q = Q(Q(conditions__in=matching_conditions) | effects_q)
return cls.objects.available(shop=shop_product.shop).filter(matching_q).distinct()
@classmethod
def get_matching(cls, basket, lines):
matching = []
exclude_condition_ids = set()
product_id_to_qty = get_product_ids_and_quantities(basket)
# Get ProductsInBasketCondition's that can't match with the basket
products_in_basket_conditions_to_check = set(
ProductsInBasketCondition.objects.filter(
products__id__in=product_id_to_qty.keys()
).values_list("id", flat=True)
)
exclude_condition_ids |= set(
ProductsInBasketCondition.objects.exclude(
id__in=products_in_basket_conditions_to_check
).values_list("id", flat=True)
)
# Get CategoryProductsBasketCondition's that can't match with the basket
categories = set(Category.objects.filter(
shop_products__product_id__in=product_id_to_qty.keys()).values_list("id", flat=True))
category_products_in_basket_to_check = set(
CategoryProductsBasketCondition.objects.filter(categories__in=categories).values_list("id", flat=True)
)
exclude_condition_ids |= set(
CategoryProductsBasketCondition.objects.exclude(
id__in=category_products_in_basket_to_check
).values_list("id", flat=True)
)
queryset = cls.objects.filter(active=True, shop=basket.shop)
if exclude_condition_ids:
queryset = queryset.exclude(conditions__id__in=exclude_condition_ids)
for campaign in queryset.prefetch_related("conditions"):
if campaign.rules_match(basket, lines):
matching.append(campaign)
return matching
def rules_match(self, basket, lines):
"""
Check if basket rules match.
They will not match if
1) The campaign is not active
2) The campaign has attached coupon
which doesn't match or is not active
3) Any of the attached rules doesn't match
"""
if not self.is_available():
return False
if self.coupon and not (self.coupon.active and self.coupon.code.upper() in [c.upper() for c in basket.codes]):
return False
for rule in self.conditions.all():
if not rule.matches(basket, lines):
return False
return True
class CouponUsage(models.Model):
coupon = models.ForeignKey('Coupon', related_name='usages')
order = models.ForeignKey(Order, related_name='coupon_usages')
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("created by"))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("modified by"))
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_("created on"))
modified_on = models.DateTimeField(auto_now=True, editable=False, verbose_name=_("modified on"))
@classmethod
def add_usage(cls, order, coupon):
return cls.objects.create(order=order, coupon=coupon)
@python_2_unicode_compatible
class Coupon(models.Model):
admin_url_suffix = "coupon"
name_field = "code" # TODO: Document me
search_fields = ["code"] # used by Select2Multiple to know which fields use to search by
code = models.CharField(max_length=12)
usage_limit_customer = models.PositiveIntegerField(
blank=True, null=True,
verbose_name=_("usage limit per customer"), help_text=_("Limit the amount of usages per a single customer."))
usage_limit = models.PositiveIntegerField(
blank=True, null=True,
verbose_name=_("usage limit"),
help_text=_("Set the absolute limit of usages for this coupon. "
"If the limit is zero (0) coupon cannot be used."))
active = models.BooleanField(default=False, verbose_name=_("is active"))
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("created by"))
modified_by = models.ForeignKey(
settings.AUTH_USER_MODEL, blank=True, null=True,
related_name="+", on_delete=models.SET_NULL,
verbose_name=_("modified by"))
created_on = models.DateTimeField(auto_now_add=True, editable=False, verbose_name=_("created on"))
modified_on = models.DateTimeField(auto_now=True, editable=False, verbose_name=_("modified on"))
def save(self, **kwargs):
campaign = BasketCampaign.objects.filter(active=True, coupon_id=self.id).first()
if campaign and BasketCampaign.objects.filter(
active=True, shop_id=campaign.shop.id, coupon__code=self.code).exclude(id=campaign.id).exists():
raise ValidationError(_("Can not have multiple active campaigns with same code."))
return super(Coupon, self).save(**kwargs)
@classmethod
def generate_code(cls, length=6):
if length > 12:
length = 12
return ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(length))
@property
def exhausted(self):
val = bool(self.usage_limit and self.usages.count() >= self.usage_limit)
return val
@property
def attached(self):
return BasketCampaign.objects.filter(coupon=self).exists()
def attach_to_campaign(self, campaign):
if not self.attached:
self.campaign = campaign
@classmethod
def is_usable(cls, code, customer):
try:
code = cls.objects.get(code__iexact=code, active=True)
return code.can_use_code(customer)
except cls.DoesNotExist:
return False
def can_use_code(self, customer):
"""
Check if customer can use the code
:param customer:
:type customer: `Contact` or None
:rtype: True|False
"""
if not self.active:
return False
if not self.attached:
return False
if self.usage_limit_customer:
if not customer or customer.is_anonymous:
return False
if (self.usages.filter(order__customer=customer, coupon=self).count() >= self.usage_limit_customer):
return False
return not self.exhausted
def use(self, order):
return CouponUsage.add_usage(order=order, coupon=self)
def increase_customer_usage_limit_by(self, amount):
if self.usage_limit_customer:
new_limit = self.usage_limit_customer + amount
else:
new_limit = self.usages.count() + amount
self.usage_limit_customer = new_limit
def increase_usage_limit_by(self, amount):
self.usage_limit = self.usage_limit + amount if self.usage_limit else (self.usages.count() + amount)
def has_been_used(self, usage_count=1):
""" See if code is used the times given """
return CouponUsage.objects.filter(coupon=self).count() >= usage_count
def __str__(self):
return self.code
CatalogCampaignLogEntry = define_log_model(CatalogCampaign)
BasketCampaignLogEntry = define_log_model(BasketCampaign)
CouponLogEntry = define_log_model(Coupon)
CouponUsageLogEntry = define_log_model(CouponUsage)
| agpl-3.0 | 2,415,510,597,969,143,000 | 40.022523 | 118 | 0.65521 | false | 3.994298 | false | false | false |
pade/sprinkler | src/channel.py | 1 | 3188 | # -*- coding: UTF-8 -*-
'''
Created on 29 août 2016
@author: dassierp
'''
import logging
from progdays import Progdays
class Channel():
'''
Control a water channel
'''
def __init__(self, pName, pChNumber, pHwInterface):
'''
Constructor
@param pName: channel name
@param pChNumber: channel number, from 0 to number
of physical channel - 1
@param pHwInterface: a class derived from BaseGpio
'''
self.__nb = pChNumber
self.__hw = pHwInterface
self.__logger = logging.getLogger('sprinkler')
self.__is_enable = False
self.__manual = "AUTO"
self.__name = pName
self.__progdays = [Progdays(), Progdays()]
# On initialisation, stop water
self.__running = False
self.__logger.debug(
"Initialisation channel {} ({})".format(self.__name, self.__nb))
def _get_nb(self):
return self.__nb
def _set_enable(self, pEnable):
'''
@param pEnable: True to enable the channel (can be used)
'''
self.__is_enable = pEnable
if pEnable:
self.__logger.info(
"Channel {} ({}) is enabled".format(self.__name, self.__nb))
else:
self.__logger.info(
"Channel {} ({}) is disabled".format(self.__name, self.__nb))
def _get_enable(self):
return self.__is_enable
def _get_name(self):
return self.__name
def _set_name(self, pName):
self.__name = pName
def _get_running(self):
return self.__running
def _set_running(self, pState):
'''
@param pState: boolean, if pState is True, then channel runs,
otherwise channel is not running
If channel is not enable, do nothing
'''
if self.isenable is True:
if pState is True:
self.__running = True
self.__logger.debug(
"Channel {} ({}) ON".format(self.name, self.nb))
else:
self.__running = False
self.__logger.debug(
"Channel {} ({}) OFF".format(self.name, self.nb))
self.__hw.write(self.__nb, self.__running)
def _get_prog(self):
return self.__progdays
def _set_prog(self, progs):
'''
Set a new program
@param progs: Table of Progdays class
'''
self.__progdays = progs
def _set_manual(self, action):
""" Manual command, superseds program
@param action: must be: "OFF", "ON", or "AUTO"
"""
if action == "ON":
self.__manual = "ON"
elif action == "OFF":
self.__manual = "OFF"
else:
self.__manual = "AUTO"
def _get_manual(self):
return self.__manual
nb = property(_get_nb, None, None, None)
running = property(_get_running, _set_running, None, None)
isenable = property(_get_enable, _set_enable, None, None)
name = property(_get_name, _set_name, None, None)
progs = property(_get_prog, _set_prog, None, None)
manual = property(_get_manual, _set_manual, None, None)
| gpl-3.0 | -1,962,895,549,548,024,300 | 27.711712 | 77 | 0.531534 | false | 3.929716 | false | false | false |
Letractively/rdflib | rdflib/plugins/parsers/notation3.py | 1 | 78910 | #!/usr/bin/env python
u"""
notation3.py - Standalone Notation3 Parser
Derived from CWM, the Closed World Machine
Authors of the original suite:
* Dan Connolly <@@>
* Tim Berners-Lee <@@>
* Yosi Scharf <@@>
* Joseph M. Reagle Jr. <[email protected]>
* Rich Salz <[email protected]>
http://www.w3.org/2000/10/swap/notation3.py
Copyright 2000-2007, World Wide Web Consortium.
Copyright 2001, MIT.
Copyright 2001, Zolera Systems Inc.
License: W3C Software License
http://www.w3.org/Consortium/Legal/copyright-software
Modified by Sean B. Palmer
Copyright 2007, Sean B. Palmer. \u32E1
Modified to work with rdflib by Gunnar Aastrand Grimnes
Copyright 2010, Gunnar A. Grimnes
"""
# Python standard libraries
import types
import sys
import os
import re
import StringIO
import codecs
from binascii import a2b_hex
from decimal import Decimal
from rdflib.term import URIRef, BNode, Literal, Variable, _XSD_PFX, _unique_id
from rdflib.graph import QuotedGraph, ConjunctiveGraph
from rdflib import py3compat
b = py3compat.b
__all__ = ['URISyntaxError', 'BadSyntax', 'N3Parser', "verbosity", "setVerbosity", "progress", "splitFrag", "splitFragP", "join", "refTo", "base", "canonical", "runNamespace", "uniqueURI", "Canonicalize", "stripCR", "dummyWrite", "toBool", "stringToN3", "backslashUify", "hexify", "dummy"]
from rdflib.parser import Parser
# Incestuous.. would be nice to separate N3 and XML
# from sax2rdf import XMLtoDOM
def XMLtoDOM(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
# SWAP http://www.w3.org/2000/10/swap
# from diag import verbosity, setVerbosity, progress
def verbosity(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def setVerbosity(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def progress(*args, **kargs):
# print >> sys.stderr, args, kargs
pass
def splitFrag(uriref):
"""split a URI reference between the fragment and the rest.
Punctuation is thrown away.
e.g.
>>> splitFrag("abc#def")
('abc', 'def')
>>> splitFrag("abcdef")
('abcdef', None)
"""
i = uriref.rfind("#")
if i >= 0:
return uriref[:i], uriref[i+1:]
else:
return uriref, None
def splitFragP(uriref, punct=0):
"""split a URI reference before the fragment
Punctuation is kept.
e.g.
>>> splitFragP("abc#def")
('abc', '#def')
>>> splitFragP("abcdef")
('abcdef', '')
"""
i = uriref.rfind("#")
if i >= 0:
return uriref[:i], uriref[i:]
else:
return uriref, ''
@py3compat.format_doctest_out
def join(here, there):
"""join an absolute URI and URI reference
(non-ascii characters are supported/doctested;
haven't checked the details of the IRI spec though)
here is assumed to be absolute.
there is URI reference.
>>> join('http://example/x/y/z', '../abc')
'http://example/x/abc'
Raise ValueError if there uses relative path
syntax but here has no hierarchical path.
>>> join('mid:foo@example', '../foo')
Traceback (most recent call last):
raise ValueError, here
ValueError: Base <mid:foo@example> has no slash after colon - with relative '../foo'.
>>> join('http://example/x/y/z', '')
'http://example/x/y/z'
>>> join('mid:foo@example', '#foo')
'mid:foo@example#foo'
We grok IRIs
>>> len(u'Andr\\xe9')
5
>>> join('http://example.org/', u'#Andr\\xe9')
%(u)s'http://example.org/#Andr\\xe9'
"""
assert(here.find("#") < 0), "Base may not contain hash: '%s'"% here # caller must splitFrag (why?)
slashl = there.find('/')
colonl = there.find(':')
# join(base, 'foo:/') -- absolute
if colonl >= 0 and (slashl < 0 or colonl < slashl):
return there
bcolonl = here.find(':')
assert(bcolonl >= 0), "Base uri '%s' is not absolute" % here # else it's not absolute
path, frag = splitFragP(there)
if not path:
return here + frag
# join('mid:foo@example', '../foo') bzzt
if here[bcolonl+1:bcolonl+2] != '/':
raise ValueError ("Base <%s> has no slash after colon - with relative '%s'." %(here, there))
if here[bcolonl+1:bcolonl+3] == '//':
bpath = here.find('/', bcolonl+3)
else:
bpath = bcolonl+1
# join('http://xyz', 'foo')
if bpath < 0:
bpath = len(here)
here = here + '/'
# join('http://xyz/', '//abc') => 'http://abc'
if there[:2] == '//':
return here[:bcolonl+1] + there
# join('http://xyz/', '/abc') => 'http://xyz/abc'
if there[:1] == '/':
return here[:bpath] + there
slashr = here.rfind('/')
while 1:
if path[:2] == './':
path = path[2:]
if path == '.':
path = ''
elif path[:3] == '../' or path == '..':
path = path[3:]
i = here.rfind('/', bpath, slashr)
if i >= 0:
here = here[:i+1]
slashr = i
else:
break
return here[:slashr+1] + path + frag
commonHost = re.compile(r'^[-_a-zA-Z0-9.]+:(//[^/]*)?/[^/]*$')
def refTo(base, uri):
"""figure out a relative URI reference from base to uri
>>> refTo('http://example/x/y/z', 'http://example/x/abc')
'../abc'
>>> refTo('file:/ex/x/y', 'file:/ex/x/q/r#s')
'q/r#s'
>>> refTo(None, 'http://ex/x/y')
'http://ex/x/y'
>>> refTo('http://ex/x/y', 'http://ex/x/y')
''
Note the relationship between refTo and join:
join(x, refTo(x, y)) == y
which points out certain strings which cannot be URIs. e.g.
>>> x='http://ex/x/y';y='http://ex/x/q:r';join(x, refTo(x, y)) == y
0
So 'http://ex/x/q:r' is not a URI. Use 'http://ex/x/q%3ar' instead:
>>> x='http://ex/x/y';y='http://ex/x/q%3ar';join(x, refTo(x, y)) == y
1
This one checks that it uses a root-realtive one where that is
all they share. Now uses root-relative where no path is shared.
This is a matter of taste but tends to give more resilience IMHO
-- and shorter paths
Note that base may be None, meaning no base. In some situations, there
just ain't a base. Slife. In these cases, relTo returns the absolute value.
The axiom abs(,rel(b,x))=x still holds.
This saves people having to set the base to "bogus:".
>>> refTo('http://ex/x/y/z', 'http://ex/r')
'/r'
"""
# assert base # don't mask bugs -danc # not a bug. -tim
if not base:
return uri
if base == uri:
return ""
# Find how many path segments in common
i = 0
while i < len(uri) and i<len(base):
if uri[i] == base[i]:
i = i + 1
else:
break
# print "# relative", base, uri, " same up to ", i
# i point to end of shortest one or first difference
m = commonHost.match(base[:i])
if m:
k = uri.find("//")
if k < 0:
k = -2 # no host
l = uri.find("/", k+2)
if uri[l+1:l+2] != "/" and base[l+1:l+2] != "/" and uri[:l] == base[:l]:
return uri[l:]
if uri[i:i+1] == "#" and len(base) == i:
return uri[i:] # fragment of base
while i > 0 and uri[i-1] != '/' :
i = i-1 # scan for slash
if i < 3:
return uri # No way.
if base.find("//", i-2) > 0 or uri.find("//", i-2) > 0:
return uri # An unshared "//"
if base.find(":", i) > 0:
return uri # An unshared ":"
n = base.count("/", i)
if n == 0 and i < len(uri) and uri[i] == '#':
return "./" + uri[i:]
elif n == 0 and i == len(uri):
return "./"
else:
return ("../" * n) + uri[i:]
def base():
"""The base URI for this process - the Web equiv of cwd
Relative or abolute unix-standard filenames parsed relative to
this yeild the URI of the file.
If we had a reliable way of getting a computer name,
we should put it in the hostname just to prevent ambiguity
"""
# return "file://" + hostname + os.getcwd() + "/"
return "file://" + _fixslash(os.getcwd()) + "/"
def _fixslash(argstr):
""" Fix windowslike filename to unixlike - (#ifdef WINDOWS)"""
s = argstr
for i in range(len(s)):
if s[i] == "\\":
s = s[:i] + "/" + s[i+1:]
if s[0] != "/" and s[1] == ":":
s = s[2:] # @@@ Hack when drive letter present
return s
URI_unreserved = b("ABCDEFGHIJJLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789-._~")
# unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
@py3compat.format_doctest_out
def canonical(str_in):
"""Convert equivalent URIs (or parts) to the same string
There are many differenet levels of URI canonicalization
which are possible. See http://www.ietf.org/rfc/rfc3986.txt
Done:
- Converfting unicode IRI to utf-8
- Escaping all non-ASCII
- De-escaping, if escaped, ALPHA (%%41-%%5A and %%61-%%7A), DIGIT (%%30-%%39),
hyphen (%%2D), period (%%2E), underscore (%%5F), or tilde (%%7E) (Sect 2.4)
- Making all escapes uppercase hexadecimal
Not done:
- Making URI scheme lowercase
- changing /./ or /foo/../ to / with care not to change host part
>>> canonical("foo bar")
%(b)s'foo%%20bar'
>>> canonical(u'http:')
%(b)s'http:'
>>> canonical('fran%%c3%%83%%c2%%a7ois')
%(b)s'fran%%C3%%83%%C2%%A7ois'
>>> canonical('a')
%(b)s'a'
>>> canonical('%%4e')
%(b)s'N'
>>> canonical('%%9d')
%(b)s'%%9D'
>>> canonical('%%2f')
%(b)s'%%2F'
>>> canonical('%%2F')
%(b)s'%%2F'
"""
if type(str_in) == type(u''):
s8 = str_in.encode('utf-8')
else:
s8 = str_in
s = b('')
i = 0
while i < len(s8):
if py3compat.PY3:
n = s8[i]
ch = bytes([n])
else:
ch = s8[i]
n = ord(ch)
if (n > 126) or (n < 33) : # %-encode controls, SP, DEL, and utf-8
s += b("%%%02X" % ord(ch))
elif ch == b('%') and i+2 < len(s8):
ch2 = a2b_hex(s8[i+1:i+3])
if ch2 in URI_unreserved:
s += ch2
else:
s += b("%%%02X" % ord(ch2))
i = i + 3
continue
else:
s += ch
i = i + 1
return s
CONTEXT = 0
PRED = 1
SUBJ = 2
OBJ = 3
PARTS = PRED, SUBJ, OBJ
ALL4 = CONTEXT, PRED, SUBJ, OBJ
SYMBOL = 0
FORMULA = 1
LITERAL = 2
LITERAL_DT = 21
LITERAL_LANG = 22
ANONYMOUS = 3
XMLLITERAL = 25
Logic_NS = "http://www.w3.org/2000/10/swap/log#"
NODE_MERGE_URI = Logic_NS + "is" # Pseudo-property indicating node merging
forSomeSym = Logic_NS + "forSome"
forAllSym = Logic_NS + "forAll"
RDF_type_URI = "http://www.w3.org/1999/02/22-rdf-syntax-ns#type"
RDF_NS_URI = "http://www.w3.org/1999/02/22-rdf-syntax-ns#"
OWL_NS = "http://www.w3.org/2002/07/owl#"
DAML_sameAs_URI = OWL_NS+"sameAs"
parsesTo_URI = Logic_NS + "parsesTo"
RDF_spec = "http://www.w3.org/TR/REC-rdf-syntax/"
List_NS = RDF_NS_URI # From 20030808
_Old_Logic_NS = "http://www.w3.org/2000/10/swap/log.n3#"
N3_first = (SYMBOL, List_NS + "first")
N3_rest = (SYMBOL, List_NS + "rest")
N3_li = (SYMBOL, List_NS + "li")
N3_nil = (SYMBOL, List_NS + "nil")
N3_List = (SYMBOL, List_NS + "List")
N3_Empty = (SYMBOL, List_NS + "Empty")
runNamespaceValue = None
def runNamespace():
"Return a URI suitable as a namespace for run-local objects"
# @@@ include hostname (privacy?) (hash it?)
global runNamespaceValue
if runNamespaceValue == None:
runNamespaceValue = join(base(), _unique_id()) + '#'
return runNamespaceValue
nextu = 0
def uniqueURI():
"A unique URI"
global nextu
nextu += 1
return runNamespace() + "u_" + `nextu`
class URISyntaxError(ValueError):
"""A parameter is passed to a routine that requires a URI reference"""
pass
tracking = False
chatty_flag = 50
from xml.dom import Node
try:
from xml.ns import XMLNS
except:
class XMLNS:
BASE = "http://www.w3.org/2000/xmlns/"
XML = "http://www.w3.org/XML/1998/namespace"
_attrs = lambda E: (E.attributes and E.attributes.values()) or []
_children = lambda E: E.childNodes or []
_IN_XML_NS = lambda n: n.namespaceURI == XMLNS.XML
_inclusive = lambda n: n.unsuppressedPrefixes == None
# Does a document/PI has lesser/greater document order than the
# first element?
_LesserElement, _Element, _GreaterElement = range(3)
def _sorter(n1, n2):
'''_sorter(n1, n2) -> int
Sorting predicate for non-NS attributes.'''
i = cmp(n1.namespaceURI, n2.namespaceURI)
if i:
return i
return cmp(n1.localName, n2.localName)
def _sorter_ns(n1, n2):
'''_sorter_ns((n,v),(n,v)) -> int
"(an empty namespace URI is lexicographically least)."'''
if n1[0] == 'xmlns':
return -1
if n2[0] == 'xmlns':
return 1
return cmp(n1[0], n2[0])
def _utilized(n, node, other_attrs, unsuppressedPrefixes):
'''_utilized(n, node, other_attrs, unsuppressedPrefixes) -> boolean
Return true if that nodespace is utilized within the node'''
if n.startswith('xmlns:'):
n = n[6:]
elif n.startswith('xmlns'):
n = n[5:]
if (n=="" and node.prefix in ["#default", None]) or \
n == node.prefix or n in unsuppressedPrefixes:
return 1
for attr in other_attrs:
if n == attr.prefix:
return 1
return 0
#_in_subset = lambda subset, node: not subset or node in subset
_in_subset = lambda subset, node: subset is None or node in subset # rich's tweak
class _implementation:
'''Implementation class for C14N. This accompanies a node during it's
processing and includes the parameters and processing state.'''
# Handler for each node type; populated during module instantiation.
handlers = {}
def __init__(self, node, write, **kw):
'''Create and run the implementation.'''
self.write = write
self.subset = kw.get('subset')
self.comments = kw.get('comments', 0)
self.unsuppressedPrefixes = kw.get('unsuppressedPrefixes')
nsdict = kw.get('nsdict', { 'xml': XMLNS.XML, 'xmlns': XMLNS.BASE })
# Processing state.
self.state = (nsdict, {'xml':''}, {}) #0422
if node.nodeType == Node.DOCUMENT_NODE:
self._do_document(node)
elif node.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
if not _inclusive(self):
self._do_element(node)
else:
inherited = self._inherit_context(node)
self._do_element(node, inherited)
elif node.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
elif node.nodeType == Node.TEXT_NODE:
self._do_text(node)
else:
raise TypeError, str(node)
def _inherit_context(self, node):
'''_inherit_context(self, node) -> list
Scan ancestors of attribute and namespace context. Used only
for single element node canonicalization, not for subset
canonicalization.'''
# Collect the initial list of xml:foo attributes.
xmlattrs = filter(_IN_XML_NS, _attrs(node))
# Walk up and get all xml:XXX attributes we inherit.
inherited, parent = [], node.parentNode
while parent and parent.nodeType == Node.ELEMENT_NODE:
for a in filter(_IN_XML_NS, _attrs(parent)):
n = a.localName
if n not in xmlattrs:
xmlattrs.append(n)
inherited.append(a)
parent = parent.parentNode
return inherited
def _do_document(self, node):
'''_do_document(self, node) -> None
Process a document node. documentOrder holds whether the document
element has been encountered such that PIs/comments can be written
as specified.'''
self.documentOrder = _LesserElement
for child in node.childNodes:
if child.nodeType == Node.ELEMENT_NODE:
self.documentOrder = _Element # At document element
self._do_element(child)
self.documentOrder = _GreaterElement # After document element
elif child.nodeType == Node.PROCESSING_INSTRUCTION_NODE:
self._do_pi(child)
elif child.nodeType == Node.COMMENT_NODE:
self._do_comment(child)
elif child.nodeType == Node.DOCUMENT_TYPE_NODE:
pass
else:
raise TypeError, str(child)
handlers[Node.DOCUMENT_NODE] = _do_document
def _do_text(self, node):
'''_do_text(self, node) -> None
Process a text or CDATA node. Render various special characters
as their C14N entity representations.'''
if not _in_subset(self.subset, node):
return
s = node.data.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
s = s.replace("\015", "
")
if s:
self.write(s)
handlers[Node.TEXT_NODE] = _do_text
handlers[Node.CDATA_SECTION_NODE] = _do_text
def _do_pi(self, node):
'''_do_pi(self, node) -> None
Process a PI node. Render a leading or trailing #xA if the
document order of the PI is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<?')
W(node.nodeName)
s = node.data
if s:
W(' ')
W(s)
W('?>')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.PROCESSING_INSTRUCTION_NODE] = _do_pi
def _do_comment(self, node):
'''_do_comment(self, node) -> None
Process a comment node. Render a leading or trailing #xA if the
document order of the comment is greater or lesser (respectively)
than the document element.
'''
if not _in_subset(self.subset, node):
return
if self.comments:
W = self.write
if self.documentOrder == _GreaterElement:
W('\n')
W('<!--')
W(node.data)
W('-->')
if self.documentOrder == _LesserElement:
W('\n')
handlers[Node.COMMENT_NODE] = _do_comment
def _do_attr(self, n, value):
''''_do_attr(self, node) -> None
Process an attribute.'''
W = self.write
W(' ')
W(n)
W('="')
s = value.replace(value, "&", "&")
s = s.replace("<", "<")
s = s.replace('"', '"')
s = s.replace('\011', '	')
s = s.replace('\012', '
')
s = s.replace('\015', '
')
W(s)
W('"')
def _do_element(self, node, initial_other_attrs = []):
'''_do_element(self, node, initial_other_attrs = []) -> None
Process an element (and its children).'''
# Get state (from the stack) make local copies.
# ns_parent -- NS declarations in parent
# ns_rendered -- NS nodes rendered by ancestors
# ns_local -- NS declarations relevant to this element
# xml_attrs -- Attributes in XML namespace from parent
# xml_attrs_local -- Local attributes in XML namespace.
ns_parent, ns_rendered, xml_attrs = \
self.state[0], self.state[1].copy(), self.state[2].copy() #0422
ns_local = ns_parent.copy()
xml_attrs_local = {}
# progress("_do_element node.nodeName=", node.nodeName)
# progress("_do_element node.namespaceURI", node.namespaceURI)
# progress("_do_element node.tocml()", node.toxml())
# Divide attributes into NS, XML, and others.
other_attrs = initial_other_attrs[:]
in_subset = _in_subset(self.subset, node)
for a in _attrs(node):
# progress("\t_do_element a.nodeName=", a.nodeName)
if a.namespaceURI == XMLNS.BASE:
n = a.nodeName
if n == "xmlns:":
n = "xmlns" # DOM bug workaround
ns_local[n] = a.nodeValue
elif a.namespaceURI == XMLNS.XML:
if _inclusive(self) or in_subset:
xml_attrs_local[a.nodeName] = a #0426
else:
other_attrs.append(a)
#add local xml:foo attributes to ancestor's xml:foo attributes
xml_attrs.update(xml_attrs_local)
# Render the node
W, name = self.write, None
if in_subset:
name = node.nodeName
W('<')
W(name)
# Create list of NS attributes to render.
ns_to_render = []
for n, v in ns_local.items():
# If default namespace is XMLNS.BASE or empty,
# and if an ancestor was the same
if n == "xmlns" and v in [ XMLNS.BASE, '' ] \
and ns_rendered.get('xmlns') in [ XMLNS.BASE, '', None ]:
continue
# "omit namespace node with local name xml, which defines
# the xml prefix, if its string value is
# http://www.w3.org/XML/1998/namespace."
if n in ["xmlns:xml", "xml"] \
and v in [ 'http://www.w3.org/XML/1998/namespace' ]:
continue
# If not previously rendered
# and it's inclusive or utilized
if (n, v) not in ns_rendered.items() \
and (_inclusive(self) or \
_utilized(n, node, other_attrs, self.unsuppressedPrefixes)):
ns_to_render.append((n, v))
# Sort and render the ns, marking what was rendered.
ns_to_render.sort(_sorter_ns)
for n, v in ns_to_render:
self._do_attr(n, v)
ns_rendered[n] = v #0417
# If exclusive or the parent is in the subset, add the local xml attributes
# Else, add all local and ancestor xml attributes
# Sort and render the attributes.
if not _inclusive(self) or _in_subset(self.subset, node.parentNode): #0426
other_attrs.extend(xml_attrs_local.values())
else:
other_attrs.extend(xml_attrs.values())
other_attrs.sort(_sorter)
for a in other_attrs:
self._do_attr(a.nodeName, a.value)
W('>')
# Push state, recurse, pop state.
state, self.state = self.state, (ns_local, ns_rendered, xml_attrs)
for c in _children(node):
_implementation.handlers[c.nodeType](self, c)
self.state = state
if name:
W('</%s>' % name)
handlers[Node.ELEMENT_NODE] = _do_element
def Canonicalize(node, output=None, **kw):
'''Canonicalize(node, output=None, **kw) -> UTF-8
Canonicalize a DOM document/element node and all descendents.
Return the text; if output is specified then output.write will
be called to output the text and None will be returned
Keyword parameters:
nsdict -- a dictionary of prefix:uri namespace entries
assumed to exist in the surrounding context
comments -- keep comments if non-zero (default is 0)
subset -- Canonical XML subsetting resulting from XPath (default is [])
unsuppressedPrefixes -- do exclusive C14N, and this specifies the
prefixes that should be inherited.
'''
if output:
apply(_implementation, (node, output.write), kw)
else:
s = StringIO.StringIO()
apply(_implementation, (node, s.write), kw)
return s.getvalue()
# end of xmlC14n.py
# from why import BecauseOfData, becauseSubexpression
def BecauseOfData(*args, **kargs):
# print args, kargs
pass
def becauseSubexpression(*args, **kargs):
# print args, kargs
pass
N3_forSome_URI = forSomeSym
N3_forAll_URI = forAllSym
# Magic resources we know about
ADDED_HASH = "#" # Stop where we use this in case we want to remove it!
# This is the hash on namespace URIs
RDF_type = ( SYMBOL , RDF_type_URI )
DAML_sameAs = ( SYMBOL, DAML_sameAs_URI )
LOG_implies_URI = "http://www.w3.org/2000/10/swap/log#implies"
BOOLEAN_DATATYPE = _XSD_PFX + "boolean"
DECIMAL_DATATYPE = _XSD_PFX + "decimal"
DOUBLE_DATATYPE = _XSD_PFX + "double"
FLOAT_DATATYPE = _XSD_PFX + "float"
INTEGER_DATATYPE = _XSD_PFX + "integer"
option_noregen = 0 # If set, do not regenerate genids on output
# @@ I18n - the notname chars need extending for well known unicode non-text
# characters. The XML spec switched to assuming unknown things were name
# characaters.
# _namechars = string.lowercase + string.uppercase + string.digits + '_-'
_notQNameChars = "\t\r\n !\"#$%&'()*.,+/;<=>?@[\\]^`{|}~" # else valid qname :-/
_notNameChars = _notQNameChars + ":" # Assume anything else valid name :-/
_rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
N3CommentCharacter = "#" # For unix script #! compatabilty
########################################## Parse string to sink
#
# Regular expressions:
eol = re.compile(r'[ \t]*(#[^\n]*)?\r?\n') # end of line, poss. w/comment
eof = re.compile(r'[ \t]*(#[^\n]*)?$') # end of file, poss. w/comment
ws = re.compile(r'[ \t]*') # Whitespace not including NL
signed_integer = re.compile(r'[-+]?[0-9]+') # integer
number_syntax = re.compile(r'(?P<integer>[-+]?[0-9]+)(?P<decimal>\.[0-9]+)?(?P<exponent>(?:e|E)[-+]?[0-9]+)?')
digitstring = re.compile(r'[0-9]+') # Unsigned integer
interesting = re.compile(r'[\\\r\n\"]')
langcode = re.compile(r'[a-zA-Z0-9]+(-[a-zA-Z0-9]+)?')
#"
class SinkParser:
def __init__(self, store, openFormula=None, thisDoc="", baseURI=None,
genPrefix = "", flags="",
why=None):
""" note: namespace names should *not* end in #;
the # will get added during qname processing """
self._bindings = {}
self._flags = flags
if thisDoc != "":
assert ':' in thisDoc, "Document URI not absolute: <%s>" % thisDoc
self._bindings[""] = thisDoc + "#" # default
self._store = store
if genPrefix:
store.setGenPrefix(genPrefix) # pass it on
self._thisDoc = thisDoc
self.lines = 0 # for error handling
self.startOfLine = 0 # For calculating character number
self._genPrefix = genPrefix
self.keywords = ['a', 'this', 'bind', 'has', 'is', 'of', 'true', 'false' ]
self.keywordsSet = 0 # Then only can others be considerd qnames
self._anonymousNodes = {} # Dict of anon nodes already declared ln: Term
self._variables = {}
self._parentVariables = {}
self._reason = why # Why the parser was asked to parse this
self._reason2 = None # Why these triples
# was: diag.tracking
if tracking:
self._reason2 = BecauseOfData(
store.newSymbol(thisDoc), because=self._reason)
if baseURI:
self._baseURI = baseURI
else:
if thisDoc:
self._baseURI = thisDoc
else:
self._baseURI = None
assert not self._baseURI or ':' in self._baseURI
if not self._genPrefix:
if self._thisDoc:
self._genPrefix = self._thisDoc + "#_g"
else:
self._genPrefix = uniqueURI()
if openFormula == None:
if self._thisDoc:
self._formula = store.newFormula(thisDoc + "#_formula")
else:
self._formula = store.newFormula()
else:
self._formula = openFormula
self._context = self._formula
self._parentContext = None
def here(self, i):
"""String generated from position in file
This is for repeatability when refering people to bnodes in a document.
This has diagnostic uses less formally, as it should point one to which
bnode the arbitrary identifier actually is. It gives the
line and character number of the '[' charcacter or path character
which introduced the blank node. The first blank node is boringly _L1C1.
It used to be used only for tracking, but for tests in general
it makes the canonical ordering of bnodes repeatable."""
return "%s_L%iC%i" % (self._genPrefix , self.lines,
i - self.startOfLine + 1)
def formula(self):
return self._formula
def loadStream(self, stream):
return self.loadBuf(stream.read()) # Not ideal
def loadBuf(self, buf):
"""Parses a buffer and returns its top level formula"""
self.startDoc()
self.feed(buf)
return self.endDoc() # self._formula
def feed(self, octets):
"""Feed an octet stream tothe parser
if BadSyntax is raised, the string
passed in the exception object is the
remainder after any statements have been parsed.
So if there is more data to feed to the
parser, it should be straightforward to recover."""
if not isinstance(octets, unicode):
s = octets.decode('utf-8')
# NB already decoded, so \ufeff
if len(s) > 0 and s[0] == codecs.BOM_UTF8.decode('utf-8'):
s = s[1:]
else:
s = octets
i = 0
while i >= 0:
j = self.skipSpace(s, i)
if j < 0:
return
i = self.directiveOrStatement(s, j)
if i < 0:
print("# next char: %s" % s[j])
raise BadSyntax(self._thisDoc, self.lines, s, j,
"expected directive or statement")
def directiveOrStatement(self, argstr, h):
i = self.skipSpace(argstr, h)
if i < 0:
return i # EOF
j = self.directive(argstr, i)
if j >= 0:
return self.checkDot(argstr, j)
j = self.statement(argstr, i)
if j >= 0:
return self.checkDot(argstr, j)
return j
#@@I18N
global _notNameChars
#_namechars = string.lowercase + string.uppercase + string.digits + '_-'
def tok(self, tok, argstr, i):
"""Check for keyword. Space must have been stripped on entry and
we must not be at end of file."""
assert tok[0] not in _notNameChars # not for punctuation
if argstr[i:i+1] == "@":
i = i+1
else:
if tok not in self.keywords:
return -1 # No, this has neither keywords declaration nor "@"
if (argstr[i:i+len(tok)] == tok
and (argstr[i+len(tok)] in _notQNameChars )):
i = i + len(tok)
return i
else:
return -1
def directive(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
res = []
j = self.tok('bind', argstr, i) # implied "#". Obsolete.
if j > 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"keyword bind is obsolete: use @prefix")
j = self.tok('keywords', argstr, i)
if j > 0:
i = self.commaSeparatedList(argstr, j, res, self.bareWord)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"'@keywords' needs comma separated list of words")
self.setKeywords(res[:])
# was: diag.chatty_flag
if chatty_flag > 80:
progress("Keywords ", self.keywords)
return i
j = self.tok('forAll', argstr, i)
if j > 0:
i = self.commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Bad variable list after @forAll")
for x in res:
#self._context.declareUniversal(x)
if x not in self._variables or x in self._parentVariables:
self._variables[x] = self._context.newUniversal(x)
return i
j = self.tok('forSome', argstr, i)
if j > 0:
i = self. commaSeparatedList(argstr, j, res, self.uri_ref2)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Bad variable list after @forSome")
for x in res:
self._context.declareExistential(x)
return i
j = self.tok('prefix', argstr, i) # no implied "#"
if j >= 0:
t = []
i = self.qname(argstr, j, t)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"expected qname after @prefix")
j = self.uri_ref2(argstr, i, t)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected <uriref> after @prefix _qname_")
ns = self.uriOf(t[1])
if self._baseURI:
ns = join(self._baseURI, ns)
elif ":" not in ns:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"With no base URI, cannot use relative URI in @prefix <"+ns+">")
assert ':' in ns # must be absolute
self._bindings[t[0][0]] = ns
self.bind(t[0][0], hexify(ns))
return j
j = self.tok('base', argstr, i) # Added 2007/7/7
if j >= 0:
t = []
i = self.uri_ref2(argstr, j, t)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"expected <uri> after @base ")
ns = self.uriOf(t[0])
if self._baseURI:
ns = join(self._baseURI, ns)
else:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"With no previous base URI, cannot use relative URI in @base <"+ns+">")
assert ':' in ns # must be absolute
self._baseURI = ns
return i
return -1 # Not a directive, could be something else.
def bind(self, qn, uri):
assert isinstance(uri,
types.StringType), "Any unicode must be %x-encoded already"
if qn == "":
self._store.setDefaultNamespace(uri)
else:
self._store.bind(qn, uri)
def setKeywords(self, k):
"Takes a list of strings"
if k == None:
self.keywordsSet = 0
else:
self.keywords = k
self.keywordsSet = 1
def startDoc(self):
# was: self._store.startDoc()
self._store.startDoc(self._formula)
def endDoc(self):
"""Signal end of document and stop parsing. returns formula"""
self._store.endDoc(self._formula) # don't canonicalize yet
return self._formula
def makeStatement(self, quadruple):
#$$$$$$$$$$$$$$$$$$$$$
# print "# Parser output: ", `quadruple`
self._store.makeStatement(quadruple, why=self._reason2)
def statement(self, argstr, i):
r = []
i = self.object(argstr, i, r) # Allow literal for subject - extends RDF
if i < 0:
return i
j = self.property_list(argstr, i, r[0])
if j < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "expected propertylist")
return j
def subject(self, argstr, i, res):
return self.item(argstr, i, res)
def verb(self, argstr, i, res):
""" has _prop_
is _prop_ of
a
=
_prop_
>- prop ->
<- prop -<
_operator_"""
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
r = []
j = self.tok('has', argstr, i)
if j >= 0:
i = self.prop(argstr, j, r)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, j, "expected property after 'has'")
res.append(('->', r[0]))
return i
j = self.tok('is', argstr, i)
if j >= 0:
i = self.prop(argstr, j, r)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"expected <property> after 'is'")
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"End of file found, expected property after 'is'")
return j # eof
i = j
j = self.tok('of', argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected 'of' after 'is' <prop>")
res.append(('<-', r[0]))
return j
j = self.tok('a', argstr, i)
if j >= 0:
res.append(('->', RDF_type))
return j
if argstr[i:i+2] == "<=":
res.append(('<-', self._store.newSymbol(Logic_NS+"implies")))
return i+2
if argstr[i:i+1] == "=":
if argstr[i+1:i+2] == ">":
res.append(('->', self._store.newSymbol(Logic_NS+"implies")))
return i+2
res.append(('->', DAML_sameAs))
return i+1
if argstr[i:i+2] == ":=":
# patch file relates two formulae, uses this @@ really?
res.append(('->', Logic_NS+"becomes"))
return i+2
j = self.prop(argstr, i, r)
if j >= 0:
res.append(('->', r[0]))
return j
if argstr[i:i+2] == ">-" or argstr[i:i+2] == "<-":
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
">- ... -> syntax is obsolete.")
return -1
def prop(self, argstr, i, res):
return self.item(argstr, i, res)
def item(self, argstr, i, res):
return self.path(argstr, i, res)
def blankNode(self, uri=None):
if "B" not in self._flags:
return self._context.newBlankNode(uri, why=self._reason2)
x = self._context.newSymbol(uri)
self._context.declareExistential(x)
return x
def path(self, argstr, i, res):
"""Parse the path production.
"""
j = self.nodeOrLiteral(argstr, i, res)
if j < 0:
return j # nope
while argstr[j:j+1] in "!^.": # no spaces, must follow exactly (?)
ch = argstr[j:j+1] # @@ Allow "." followed IMMEDIATELY by a node.
if ch == ".":
ahead = argstr[j+1:j+2]
if not ahead or (ahead in _notNameChars
and ahead not in ":?<[{("): break
subj = res.pop()
obj = self.blankNode(uri=self.here(j))
j = self.node(argstr, j+1, res)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"EOF found in middle of path syntax")
pred = res.pop()
if ch == "^": # Reverse traverse
self.makeStatement((self._context, pred, obj, subj))
else:
self.makeStatement((self._context, pred, subj, obj))
res.append(obj)
return j
def anonymousNode(self, ln):
"""Remember or generate a term for one of these _: anonymous nodes"""
term = self._anonymousNodes.get(ln, None)
if term != None:
return term
term = self._store.newBlankNode(self._context, why=self._reason2)
self._anonymousNodes[ln] = term
return term
def node(self, argstr, i, res, subjectAlready=None):
"""Parse the <node> production.
Space is now skipped once at the beginning
instead of in multipe calls to self.skipSpace().
"""
subj = subjectAlready
j = self.skipSpace(argstr, i)
if j < 0:
return j #eof
i = j
ch = argstr[i:i+1] # Quick 1-character checks first:
if ch == "[":
bnodeID = self.here(i)
j = self.skipSpace(argstr, i+1)
if j < 0:
raise BadSyntax(self._thisDoc,
self.lines, argstr, i, "EOF after '['")
if argstr[j:j+1] == "=": # Hack for "is" binding name to anon node
i = j + 1
objs = []
j = self.objectList(argstr, i, objs)
if j >= 0:
subj = objs[0]
if len(objs) > 1:
for obj in objs:
self.makeStatement((self._context,
DAML_sameAs, subj, obj))
j = self.skipSpace(argstr, j)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF when objectList expected after [ = ")
if argstr[j:j+1] == ";":
j = j+1
else:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"objectList expected after [= ")
if subj is None:
subj = self.blankNode(uri= bnodeID)
i = self.property_list(argstr, j, subj)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"property_list expected")
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF when ']' expected after [ <propertyList>")
if argstr[j:j+1] != "]":
raise BadSyntax(self._thisDoc,
self.lines, argstr, j, "']' expected")
res.append(subj)
return j+1
if ch == "{":
ch2 = argstr[i+1:i+2]
if ch2 == '$':
i += 1
j = i + 1
List = []
first_run = True
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"needed '$}', found end.")
if argstr[i:i+2] == '$}':
j = i+2
break
if not first_run:
if argstr[i:i+1] == ',':
i += 1
else:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "expected: ','")
else:
first_run = False
item = []
j = self.item(argstr, i, item) #@@@@@ should be path, was object
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected item in set or '$}'")
List.append(self._store.intern(item[0]))
res.append(self._store.newSet(List, self._context))
return j
else:
j = i + 1
oldParentContext = self._parentContext
self._parentContext = self._context
parentAnonymousNodes = self._anonymousNodes
grandParentVariables = self._parentVariables
self._parentVariables = self._variables
self._anonymousNodes = {}
self._variables = self._variables.copy()
reason2 = self._reason2
self._reason2 = becauseSubexpression
if subj is None:
subj = self._store.newFormula()
self._context = subj
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "needed '}', found end.")
if argstr[i:i+1] == "}":
j = i+1
break
j = self.directiveOrStatement(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "expected statement or '}'")
self._anonymousNodes = parentAnonymousNodes
self._variables = self._parentVariables
self._parentVariables = grandParentVariables
self._context = self._parentContext
self._reason2 = reason2
self._parentContext = oldParentContext
res.append(subj.close()) # No use until closed
return j
if ch == "(":
thing_type = self._store.newList
ch2 = argstr[i+1:i+2]
if ch2 == '$':
thing_type = self._store.newSet
i += 1
j = i+1
List = []
while 1:
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines,
argstr, i, "needed ')', found end.")
if argstr[i:i+1] == ')':
j = i+1
break
item = []
j = self.item(argstr, i, item) #@@@@@ should be path, was object
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"expected item in list or ')'")
List.append(self._store.intern(item[0]))
res.append(thing_type(List, self._context))
return j
j = self.tok('this', argstr, i) # This context
if j >= 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Keyword 'this' was ancient N3. Now use @forSome and @forAll keywords.")
res.append(self._context)
return j
#booleans
j = self.tok('true', argstr, i)
if j >= 0:
res.append(True)
return j
j = self.tok('false', argstr, i)
if j >= 0:
res.append(False)
return j
if subj is None: # If this can be a named node, then check for a name.
j = self.uri_ref2(argstr, i, res)
if j >= 0:
return j
return -1
def property_list(self, argstr, i, subj):
"""Parse property list
Leaves the terminating punctuation in the buffer
"""
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF found when expected verb in property list")
return j #eof
if argstr[j:j+2] == ":-":
i = j + 2
res = []
j = self.node(argstr, i, res, subj)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"bad {} or () or [] node after :- ")
i = j
continue
i = j
v = []
j = self.verb(argstr, i, v)
if j <= 0:
return i # void but valid
objs = []
i = self.objectList(argstr, j, objs)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"objectList expected")
for obj in objs:
dira, sym = v[0]
if dira == '->':
self.makeStatement((self._context, sym, subj, obj))
else:
self.makeStatement((self._context, sym, obj, subj))
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"EOF found in list of objects")
return j #eof
if argstr[i:i+1] != ";":
return i
i = i+1 # skip semicolon and continue
def commaSeparatedList(self, argstr, j, res, what):
"""return value: -1 bad syntax; >1 new position in argstr
res has things found appended
"""
i = self.skipSpace(argstr, j)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"EOF found expecting comma sep list")
return i
if argstr[i] == ".":
return j # empty list is OK
i = what(argstr, i, res)
if i < 0:
return -1
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
return j # eof
ch = argstr[j:j+1]
if ch != ",":
if ch != ".":
return -1
return j # Found but not swallowed "."
i = what(argstr, j+1, res)
if i < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"bad list content")
return i
def objectList(self, argstr, i, res):
i = self.object(argstr, i, res)
if i < 0:
return -1
while 1:
j = self.skipSpace(argstr, i)
if j < 0:
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"EOF found after object")
return j #eof
if argstr[j:j+1] != ",":
return j # Found something else!
i = self.object(argstr, j+1, res)
if i < 0:
return i
def checkDot(self, argstr, i):
j = self.skipSpace(argstr, i)
if j < 0:
return j #eof
if argstr[j:j+1] == ".":
return j+1 # skip
if argstr[j:j+1] == "}":
return j # don't skip it
if argstr[j:j+1] == "]":
return j
raise BadSyntax(self._thisDoc, self.lines,
argstr, j, "expected '.' or '}' or ']' at end of statement")
return i
def uri_ref2(self, argstr, i, res):
"""Generate uri from n3 representation.
Note that the RDF convention of directly concatenating
NS and local name is now used though I prefer inserting a '#'
to make the namesapces look more like what XML folks expect.
"""
qn = []
j = self.qname(argstr, i, qn)
if j >= 0:
pfx, ln = qn[0]
if pfx is None:
assert 0, "not used?"
ns = self._baseURI + ADDED_HASH
else:
try:
ns = self._bindings[pfx]
except KeyError:
if pfx == "_": # Magic prefix 2001/05/30, can be overridden
res.append(self.anonymousNode(ln))
return j
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Prefix \"%s:\" not bound" % (pfx))
symb = self._store.newSymbol(ns + ln)
if symb in self._variables:
res.append(self._variables[symb])
else:
res.append(symb) # @@@ "#" CONVENTION
if not ns.find("#"):
progress("Warning: no # on namespace %s," % ns)
return j
i = self.skipSpace(argstr, i)
if i < 0:
return -1
if argstr[i] == "?":
v = []
j = self.variable(argstr, i, v)
if j > 0: #Forget varibles as a class, only in context.
res.append(v[0])
return j
return -1
elif argstr[i] == "<":
i = i + 1
st = i
while i < len(argstr):
if argstr[i] == ">":
uref = argstr[st:i] # the join should dealt with "":
if self._baseURI:
uref = join(self._baseURI, uref) # was: uripath.join
else:
assert ":" in uref, \
"With no base URI, cannot deal with relative URIs"
if argstr[i-1:i] == "#" and not uref[-1:] == "#":
uref = uref + "#" # She meant it! Weirdness in urlparse?
symb = self._store.newSymbol(uref)
if symb in self._variables:
res.append(self._variables[symb])
else:
res.append(symb)
return i+1
i = i + 1
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"unterminated URI reference")
elif self.keywordsSet:
v = []
j = self.bareWord(argstr, i, v)
if j < 0:
return -1 #Forget varibles as a class, only in context.
if v[0] in self.keywords:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
'Keyword "%s" not allowed here.' % v[0])
res.append(self._store.newSymbol(self._bindings[""]+v[0]))
return j
else:
return -1
def skipSpace(self, argstr, i):
"""Skip white space, newlines and comments.
return -1 if EOF, else position of first non-ws character"""
while 1:
m = eol.match(argstr, i)
if m == None:
break
self.lines = self.lines + 1
i = m.end() # Point to first character unmatched
self.startOfLine = i
m = ws.match(argstr, i)
if m != None:
i = m.end()
m = eof.match(argstr, i)
if m != None:
return -1
return i
def variable(self, argstr, i, res):
""" ?abc -> variable(:abc)
"""
j = self.skipSpace(argstr, i)
if j < 0:
return -1
if argstr[j:j+1] != "?":
return -1
j = j + 1
i = j
if argstr[j] in "0123456789-":
raise BadSyntax(self._thisDoc, self.lines, argstr, j,
"Varible name can't start with '%s'" % argstr[j])
return -1
while i < len(argstr) and argstr[i] not in _notNameChars:
i = i+1
if self._parentContext == None:
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._variables:
self._variables[varURI] = self._context.newUniversal(varURI
, why=self._reason2)
res.append(self._variables[varURI])
return i
# @@ was:
# raise BadSyntax(self._thisDoc, self.lines, argstr, j,
# "Can't use ?xxx syntax for variable in outermost level: %s"
# % argstr[j-1:i])
varURI = self._store.newSymbol(self._baseURI + "#" + argstr[j:i])
if varURI not in self._parentVariables:
self._parentVariables[varURI] = self._parentContext.newUniversal(varURI
, why=self._reason2)
res.append(self._parentVariables[varURI])
return i
def bareWord(self, argstr, i, res):
""" abc -> :abc
"""
j = self.skipSpace(argstr, i)
if j < 0:
return -1
if argstr[j] in "0123456789-" or argstr[j] in _notNameChars:
return -1
i = j
while i < len(argstr) and argstr[i] not in _notNameChars:
i = i+1
res.append(argstr[j:i])
return i
def qname(self, argstr, i, res):
"""
xyz:def -> ('xyz', 'def')
If not in keywords and keywordsSet: def -> ('', 'def')
:def -> ('', 'def')
"""
i = self.skipSpace(argstr, i)
if i < 0:
return -1
c = argstr[i]
if c in "0123456789-+.":
return -1
if c not in _notNameChars:
ln = c
i = i + 1
while i < len(argstr):
c = argstr[i]
if c=="." or c not in _notNameChars:
ln = ln + c
i = i + 1
else: break
if argstr[i-1]==".": # qname cannot end with "."
return -1
else: # First character is non-alpha
ln = '' # Was: None - TBL (why? useful?)
if i < len(argstr) and argstr[i] == ':':
pfx = ln
i = i + 1
ln = ''
while i < len(argstr):
c = argstr[i]
if c not in _notNameChars:
ln = ln + c
i = i + 1
else:
break
res.append((pfx, ln))
return i
else: # delimiter was not ":"
if ln and self.keywordsSet and ln not in self.keywords:
res.append(('', ln))
return i
return -1
def object(self, argstr, i, res):
j = self.subject(argstr, i, res)
if j >= 0:
return j
else:
j = self.skipSpace(argstr, i)
if j < 0:
return -1
else:
i = j
if argstr[i] == '"':
if argstr[i:i+3] == '"""':
delim = '"""'
else:
delim = '"'
i = i + len(delim)
j, s = self.strconst(argstr, i, delim)
res.append(self._store.newLiteral(s))
progress("New string const ", s, j)
return j
else:
return -1
def nodeOrLiteral(self, argstr, i, res):
j = self.node(argstr, i, res)
startline = self.lines # Remember where for error messages
if j >= 0:
return j
else:
j = self.skipSpace(argstr, i)
if j < 0:
return -1
else:
i = j
ch = argstr[i]
if ch in "-+0987654321":
m = number_syntax.match(argstr, i)
if m == None:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"Bad number syntax")
j = m.end()
if m.group('exponent') != None: # includes decimal exponent
res.append(float(argstr[i:j]))
# res.append(self._store.newLiteral(argstr[i:j],
# self._store.newSymbol(FLOAT_DATATYPE)))
elif m.group('decimal') != None:
res.append(Decimal(argstr[i:j]))
else:
res.append(long(argstr[i:j]))
# res.append(self._store.newLiteral(argstr[i:j],
# self._store.newSymbol(INTEGER_DATATYPE)))
return j
if argstr[i] == '"':
if argstr[i:i+3] == '"""':
delim = '"""'
else:
delim = '"'
i = i + len(delim)
dt = None
j, s = self.strconst(argstr, i, delim)
lang = None
if argstr[j:j+1] == "@": # Language?
m = langcode.match(argstr, j+1)
if m == None:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"Bad language code syntax on string literal, after @")
i = m.end()
lang = argstr[j+1:i]
j = i
if argstr[j:j+2] == "^^":
res2 = []
j = self.uri_ref2(argstr, j+2, res2) # Read datatype URI
dt = res2[0]
# if dt.uriref() == "http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral":
if dt == "http://www.w3.org/1999/02/22-rdf-syntax-ns#XMLLiteral":
try:
dom = XMLtoDOM('<rdf:envelope xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns">'
+ s
+ '</rdf:envelope>').firstChild
except:
raise ValueError('s="%s"' % s)
res.append(self._store.newXMLLiteral(dom))
return j
res.append(self._store.newLiteral(s, dt, lang))
return j
else:
return -1
def uriOf(self, sym):
if isinstance(sym, types.TupleType):
return sym[1] # old system for --pipe
# return sym.uriref() # cwm api
return sym
def strconst(self, argstr, i, delim):
"""parse an N3 string constant delimited by delim.
return index, val
"""
j = i
ustr = u"" # Empty unicode string
startline = self.lines # Remember where for error messages
while j < len(argstr):
if argstr[j] == '"':
if delim == '"': # done when delim is "
i = j + 1
return i, ustr
if delim == '"""': # done when delim is """ and ...
if argstr[j:j+5] == '"""""': # ... we have "" before
i = j + 5
ustr = ustr + '""'
return i, ustr
if argstr[j:j+4] == '""""': # ... we have " before
i = j + 4
ustr = ustr + '"'
return i, ustr
if argstr[j:j+3] == '"""': # ... current " is part of delim
i = j + 3
return i, ustr
# we are inside of the string and current char is "
j = j + 1
ustr = ustr + '"'
continue
m = interesting.search(argstr, j) # was argstr[j:].
# Note for pos param to work, MUST be compiled ... re bug?
assert m, "Quote expected in string at ^ in %s^%s" % (
argstr[j-20:j], argstr[j:j+20]) # we at least have to find a quote
i = m.start()
try:
ustr = ustr + argstr[j:i]
except UnicodeError:
err = ""
for c in argstr[j:i]:
err = err + (" %02x" % ord(c))
streason = sys.exc_info()[1].__str__()
raise BadSyntax(self._thisDoc, startline, argstr, j,
"Unicode error appending characters %s to string, because\n\t%s"
% (err, streason))
# print "@@@ i = ",i, " j=",j, "m.end=", m.end()
ch = argstr[i]
if ch == '"':
j = i
continue
elif ch == "\r": # Strip carriage returns
j = i+1
continue
elif ch == "\n":
if delim == '"':
raise BadSyntax(self._thisDoc, startline, argstr, i,
"newline found in string literal")
self.lines = self.lines + 1
ustr = ustr + ch
j = i + 1
self.startOfLine = j
elif ch == "\\":
j = i + 1
ch = argstr[j:j+1] # Will be empty if string ends
if not ch:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"unterminated string literal (2)")
k = 'abfrtvn\\"'.find(ch)
if k >= 0:
uch = '\a\b\f\r\t\v\n\\"'[k]
ustr = ustr + uch
j = j + 1
elif ch == "u":
j, ch = self.uEscape(argstr, j+1, startline)
ustr = ustr + ch
elif ch == "U":
j, ch = self.UEscape(argstr, j+1, startline)
ustr = ustr + ch
else:
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"bad escape")
raise BadSyntax(self._thisDoc, self.lines, argstr, i,
"unterminated string literal")
def uEscape(self, argstr, i, startline):
j = i
count = 0
value = 0
while count < 4: # Get 4 more characters
ch = argstr[j:j+1].lower()
# sbp http://ilrt.org/discovery/chatlogs/rdfig/2002-07-05
j = j + 1
if ch == "":
raise BadSyntax(self._thisDoc, startline, argstr, i,
"unterminated string literal(3)")
k = "0123456789abcdef".find(ch)
if k < 0:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"bad string literal hex escape")
value = value * 16 + k
count = count + 1
uch = unichr(value)
return j, uch
def UEscape(self, argstr, i, startline):
stringType = type('')
j = i
count = 0
value = '\\U'
while count < 8: # Get 8 more characters
ch = argstr[j:j+1].lower()
# sbp http://ilrt.org/discovery/chatlogs/rdfig/2002-07-05
j = j + 1
if ch == "":
raise BadSyntax(self._thisDoc, startline, argstr, i,
"unterminated string literal(3)")
k = "0123456789abcdef".find(ch)
if k < 0:
raise BadSyntax(self._thisDoc, startline, argstr, i,
"bad string literal hex escape")
value = value + ch
count = count + 1
uch = stringType(value).decode('unicode-escape')
return j, uch
wide_build = True
try:
unichr(0x10000)
except ValueError:
wide_build = False
# If we are going to do operators then they should generate
# [ is operator:plus of ( \1 \2 ) ]
class BadSyntax(SyntaxError):
def __init__(self, uri, lines, argstr, i, why):
self._str = argstr.encode('utf-8') # Better go back to strings for errors
self._i = i
self._why = why
self.lines = lines
self._uri = uri
def __str__(self):
argstr = self._str
i = self._i
st = 0
if i > 60:
pre = "..."
st = i - 60
else:
pre = ""
if len(argstr)-i > 60:
post = "..."
else:
post = ""
return 'at line %i of <%s>:\nBad syntax (%s) at ^ in:\n"%s%s^%s%s"' \
% (self.lines +1, self._uri, self._why, pre,
argstr[st:i], argstr[i:i+60], post)
def stripCR(argstr):
res = ""
for ch in argstr:
if ch != "\r":
res = res + ch
return res
def dummyWrite(x):
pass
################################################################################
def toBool(s):
if s == 'true' or s == 'True' or s == '1':
return True
if s == 'false' or s == 'False' or s == '0':
return False
raise ValueError(s)
class Formula(object):
number = 0
def __init__(self, parent):
self.counter = 0
Formula.number += 1
self.number = Formula.number
self.existentials = {}
self.universals = {}
self.quotedgraph = QuotedGraph(
store=parent.store, identifier=self.id())
def __str__(self):
return '_:Formula%s' % self.number
def id(self):
return BNode('_:Formula%s' % self.number)
def newBlankNode(self, uri=None, why=None):
if uri is None:
self.counter += 1
bn = BNode('f%sb%s' % (id(self), self.counter))
else:
bn = BNode(uri.split('#').pop().replace('_', 'b'))
return bn
def newUniversal(self, uri, why=None):
return Variable(uri.split('#').pop())
def declareExistential(self, x):
self.existentials[x] = self.newBlankNode()
def close(self):
return self.quotedgraph
r_hibyte = re.compile(r'([\x80-\xff])')
def iri(uri):
return uri.decode('utf-8')
# return unicode(r_hibyte.sub(lambda m: '%%%02X' % ord(m.group(1)), uri))
class RDFSink(object):
def __init__(self, graph):
self.rootFormula = None
self.counter = 0
self.graph = graph
def newFormula(self):
assert self.graph.store.formula_aware
f = Formula(self.graph)
return f
def newSymbol(self, *args):
uri = args[0].encode('utf-8')
return URIRef(iri(uri))
def newBlankNode(self, arg=None, **kargs):
if isinstance(arg, Formula):
return arg.newBlankNode()
elif arg is None:
self.counter += 1
bn = BNode('n' + str(self.counter))
else:
bn = BNode(str(arg[0]).split('#').pop().replace('_', 'b'))
return bn
def newLiteral(self, s, dt, lang):
if dt:
return Literal(s, datatype=dt)
else:
return Literal(s, lang=lang)
def newList(self, n, f):
if not n:
return self.newSymbol(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#nil'
)
a = self.newBlankNode(f)
first = self.newSymbol(
'http://www.w3.org/1999/02/22-rdf-syntax-ns#first'
)
rest = self.newSymbol('http://www.w3.org/1999/02/22-rdf-syntax-ns#rest')
self.makeStatement((f, first, a, n[0]))
self.makeStatement((f, rest, a, self.newList(n[1:], f)))
return a
def newSet(self, *args):
return set(args)
def setDefaultNamespace(self, *args):
return ':'.join(repr(n) for n in args)
def makeStatement(self, quadruple, why=None):
f, p, s, o = quadruple
if hasattr(p, 'formula'):
raise Exception("Formula used as predicate")
s = self.normalise(f, s)
p = self.normalise(f, p)
o = self.normalise(f, o)
if f == self.rootFormula:
# print s, p, o, '.'
self.graph.add((s, p, o))
else:
f.quotedgraph.add((s, p, o))
#return str(quadruple)
def normalise(self, f, n):
if isinstance(n, tuple):
return URIRef(unicode(n[1]))
# if isinstance(n, list):
# rdflist, f = n
# name = self.newBlankNode()
# if f == self.rootFormula:
# sublist = name
# for i in xrange(0, len(rdflist) - 1):
# print sublist, 'first', rdflist[i]
# rest = self.newBlankNode()
# print sublist, 'rest', rest
# sublist = rest
# print sublist, 'first', rdflist[-1]
# print sublist, 'rest', 'nil'
# return name
if isinstance(n, bool):
s = Literal(str(n).lower(), datatype=BOOLEAN_DATATYPE)
return s
if isinstance(n, int) or isinstance(n, long):
s = Literal(unicode(n), datatype=INTEGER_DATATYPE)
return s
if isinstance(n, Decimal):
value = str(n.normalize())
if value == '-0':
value = '0'
s = Literal(value, datatype=DECIMAL_DATATYPE )
return s
if isinstance(n, float):
s = Literal(str(n), datatype=DOUBLE_DATATYPE )
return s
if f.existentials.has_key(n):
return f.existentials[n]
# if isinstance(n, Var):
# if f.universals.has_key(n):
# return f.universals[n]
# f.universals[n] = f.newBlankNode()
# return f.universals[n]
return n
def intern(self, something):
return something
def bind(self, pfx, uri):
pass # print pfx, ':', uri
def startDoc(self, formula):
self.rootFormula = formula
def endDoc(self, formula):
pass
###################################################
#
# Utilities
#
Escapes = {'a': '\a',
'b': '\b',
'f': '\f',
'r': '\r',
't': '\t',
'v': '\v',
'n': '\n',
'\\': '\\',
'"': '"'}
forbidden1 = re.compile(ur'[\\\"\a\b\f\r\v\u0080-\U0000ffff]')
forbidden2 = re.compile(ur'[\\\"\a\b\f\r\v\t\n\u0080-\U0000ffff]')
#"
def stringToN3(argstr, singleLine=0, flags=""):
res = ''
if (len(argstr) > 20 and argstr[-1] != '"' \
and not singleLine and (argstr.find("\n") >= 0 \
or argstr.find('"') >= 0)):
delim = '"""'
forbidden = forbidden1 # (allow tabs too now)
else:
delim = '"'
forbidden = forbidden2
i = 0
while i < len(argstr):
m = forbidden.search(argstr, i)
if not m:
break
j = m.start()
res = res + argstr[i:j]
ch = m.group(0)
if ch == '"' and delim == '"""' and argstr[j:j+3] != '"""': #"
res = res + ch
else:
k = '\a\b\f\r\t\v\n\\"'.find(ch)
if k >= 0:
res = res + "\\" + 'abfrtvn\\"'[k]
else:
if 'e' in flags:
# res = res + ('\\u%04x' % ord(ch))
res = res + ('\\u%04X' % ord(ch))
# http://www.w3.org/TR/rdf-testcases/#ntriples
else:
res = res + ch
i = j + 1
# The following code fixes things for really high range Unicode
newstr = ""
for ch in res + argstr[i:]:
if ord(ch)>65535:
newstr = newstr + ('\\U%08X' % ord(ch))
# http://www.w3.org/TR/rdf-testcases/#ntriples
else:
newstr = newstr + ch
return delim + newstr + delim
def backslashUify(ustr):
"""Use URL encoding to return an ASCII string corresponding
to the given unicode"""
# progress("String is "+`ustr`)
# s1=ustr.encode('utf-8')
s = ""
for ch in ustr: # .encode('utf-8'):
if ord(ch) > 65535:
ch = "\\U%08X" % ord(ch)
elif ord(ch) > 126:
ch = "\\u%04X" % ord(ch)
else:
ch = "%c" % ord(ch)
s = s + ch
return b(s)
@py3compat.format_doctest_out
def hexify(ustr):
"""Use URL encoding to return an ASCII string
corresponding to the given UTF8 string
>>> hexify("http://example/a b")
%(b)s'http://example/a%%20b'
"""
# progress("String is "+`ustr`)
# s1=ustr.encode('utf-8')
s = ""
for ch in ustr: # .encode('utf-8'):
if ord(ch) > 126 or ord(ch) < 33 :
ch = "%%%02X" % ord(ch)
else:
ch = "%c" % ord(ch)
s = s + ch
return b(s)
# # Unused, dysfunctional.
# def dummy():
# res = ""
# if len(argstr) > 20 and (argstr.find("\n") >=0 or argstr.find('"') >=0):
# delim= '"""'
# forbidden = "\\\"\a\b\f\r\v" # (allow tabs too now)
# else:
# delim = '"'
# forbidden = "\\\"\a\b\f\r\v\t\n"
# for i in range(len(argstr)):
# ch = argstr[i]
# j = forbidden.find(ch)
# if ch == '"' and delim == '"""' \
# and i+1 < len(argstr) and argstr[i+1] != '"':
# j=-1 # Single quotes don't need escaping in long format
# if j >= 0:
# ch = "\\" + '\\"abfrvtn'[j]
# elif ch not in "\n\t" and (ch < " " or ch > "}"):
# ch = "[[" + `ch` + "]]" #[2:-1] # Use python
# res = res + ch
# return delim + res + delim
class N3Parser(Parser):
def __init__(self):
pass
def parse(self, source, graph, encoding="utf-8"):
# we're currently being handed a Graph, not a ConjunctiveGraph
assert graph.store.context_aware # is this implied by formula_aware
assert graph.store.formula_aware
if encoding not in [None, "utf-8"]:
raise Exception("N3 files are always utf-8 encoded, I was passed: %s"%encoding)
conj_graph = ConjunctiveGraph(store=graph.store)
conj_graph.default_context = graph # TODO: CG __init__ should have a default_context arg
# TODO: update N3Processor so that it can use conj_graph as the sink
conj_graph.namespace_manager = graph.namespace_manager
sink = RDFSink(conj_graph)
baseURI = graph.absolutize(source.getPublicId() or source.getSystemId() or "")
p = SinkParser(sink, baseURI=baseURI)
p.loadStream(source.getByteStream())
for prefix, namespace in p._bindings.items():
conj_graph.bind(prefix, namespace)
def _test():
import doctest
doctest.testmod()
# if __name__ == '__main__':
# _test()
def main():
g = ConjunctiveGraph()
sink = RDFSink(g)
base_uri = 'file://' + os.path.join(os.getcwd(), sys.argv[1])
p = SinkParser(sink, baseURI=base_uri)
p._bindings[''] = p._baseURI + '#'
p.startDoc()
f = open(sys.argv[1], 'rb')
rdbytes = f.read()
f.close()
p.feed(rdbytes)
p.endDoc()
for t in g.quads((None, None, None)):
print t
if __name__ == '__main__':
main()
#ends
| bsd-3-clause | -6,180,058,760,584,588,000 | 31.715589 | 290 | 0.48554 | false | 3.804175 | false | false | false |
ieugen/Teachingbox | usercontrib/crawler3D/python/pyui/grid.py | 1 | 8378 | # PyUI
# Copyright (C) 2001-2002 Sean C. Riley
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""A scrollable grid class for PyUI. The elements in the grid are PyUI widgets.
"""
import pyui
import copy
from pyui.desktop import getDesktop, getTheme, getRenderer
class GridPanel(pyui.widgets.Panel):
"""A scrollable grid class. I have a grid of cells of which only some will
be visible at any time.
"""
def __init__(self, visibleWidth, visibleHeight, useColumnHeaders = 1, useRowHeaders = 1):
self.vWidth = visibleWidth
self.vHeight = visibleHeight
self.scrollPos = 0
pyui.widgets.Panel.__init__(self)
self.setLayout(pyui.layouts.BorderLayoutManager())
self.cheader = ColumnHeaders(visibleWidth)
self.rheader = RowHeaders(visibleHeight)
self.scrollBar = pyui.widgets.VScroll()
self.scrollBar.resize(10, 50)
self.cellPanel = CellPanel(visibleWidth, visibleHeight)
if useColumnHeaders:
self.addChild(self.cheader, pyui.layouts.BorderLayoutManager.NORTH)
if useRowHeaders:
self.addChild(self.rheader, pyui.layouts.BorderLayoutManager.WEST)
self.addChild(self.cellPanel, pyui.layouts.BorderLayoutManager.CENTER)
self.addChild(self.scrollBar, pyui.layouts.BorderLayoutManager.EAST)
self.pack()
def resize(self, w, h):
print "Resizing GridPanel", w, h
pyui.widgets.Panel.resize(self, w, h)
def setColumnName(self, columnNum, name):
self.cheader.setColumnName(columnNum, name)
def setRowName(self, rowNum, name):
self.rheader.setRowName(rowNum, name)
def getCellAt(self, x, y):
"""return a cell at the co-ordinates.
"""
return self.cellPanel.getCellAt(x, y)
def putCellAt(self, widget, x, y):
"""put a widget into the grid at the co-ordinates.
"""
return self.cellPanel.putCellAt(widget, x, y)
def removeCellAt(self, x, y):
"""remove a widget from the grid
"""
return self.cellPanel.removeCellAt(x, y)
def findCellAt(self, posX, posY):
"""Find the cell at the x,y pixel position. Pass-through to the inner grid panel.
"""
return self.cellPanel.findCellAt(posX, posY)
def findCoordinatesAt(self, posX, posY):
"""convert screen co-ordinates into grid co-ordinates.
"""
return self.cellPanel.findCoordinatesAt(posX, posY)
def clear(self):
return self.cellPanel.clear()
class CellPanel(pyui.widgets.Panel):
"""The inner cell grid of a GridPanel.
"""
def __init__(self, vWidth, vHeight):
pyui.widgets.Panel.__init__(self)
self.vWidth = float(vWidth)
self.vHeight = float(vHeight)
self.cells = {}
self.scrollPos = 0
self.cellWidth = 1
self.cellHeight = 1
self.numRows = vHeight
self.registerEvent(pyui.locals.SCROLLPOS, self.onScroll)
def resize(self, width, height):
pyui.widgets.Panel.resize(self, width, height)
self.cellWidth = self.windowRect[2] / self.vWidth
self.cellHeight = self.windowRect[3] / self.vHeight
self.setupAllCells()
def setupAllCells(self):
for key in self.cells.keys():
if key[1] >= self.scrollPos and key[1] < self.scrollPos + self.vHeight:
self.setupCell( self.cells[key], key[0], key[1])
self.cells[key].setShow(1)
else:
self.cells[key].setShow(0)
def getCellAt(self, x, y):
return self.cells.get( (x,y), None)
def removeCellAt(self, x, y):
cell = self.cells.get( (x,y), None)
if cell:
cell.destroy()
self.children.remove(cell)
del self.cells[ (x,y) ]
self.setDirty(1)
def clear(self):
tmp = copy.copy(self.children)
for cell in tmp:
self.removeCellAt( cell.gridPosition[0], cell.gridPosition[1] )
def putCellAt(self, widget, x, y):
if self.cells.has_key( (x,y) ):
print "Error: already a widget at (%s,%s)" % (x,y)
return 0
self.addChild(widget)
self.cells[ (x,y) ] = widget
self.setupCell(widget, x, y)
if y > self.numRows:
self.numRows = y + 1
self.parent.scrollBar.setNumItems(y+1, self.vHeight)
return 1
def setupCell(self, widget, x, y):
"""this moves and positions the cell. it also sets "gridPosition" so the cell
knows where in the grid it lives.
"""
if y >= self.scrollPos and y < self.scrollPos + self.vHeight:
widget.setShow(1)
else:
widget.setShow(0)
#print "setup cell", x, y
widget.gridPosition = (x,y)
widget.moveto( self.cellWidth * x + 2,
self.cellHeight * (y-self.scrollPos) + 2)
widget.resize( self.cellWidth -4, self.cellHeight -4)
def onScroll(self, event):
if event.id == self.parent.scrollBar.id:
self.scrollPos = event.pos
self.setupAllCells()
self.setDirty(1)
self.window.setDirty(1)
return 1
return 0
def findCellAt(self, posX, posY):
"""find the cell at x,y
"""
x = int((posX - self.rect[0]) / self.cellWidth)
y = int((posY - self.rect[1]) / self.cellHeight) + self.scrollPos
return self.cells.get( (x,y), None)
def findCoordinatesAt(self, posX, posY):
x = int((posX - self.rect[0]) / self.cellWidth)
y = int((posY - self.rect[1]) / self.cellHeight) + self.scrollPos
return (x,y)
def draw(self, renderer):
"""only draw the visible widgets.
"""
for key in self.cells.keys():
if key[1] >= self.scrollPos and key[1] < self.scrollPos + self.vHeight:
self.cells[key].draw(renderer)
xpos = self.windowRect[0]
ypos = self.windowRect[1]
w = self.windowRect[2]
h = self.windowRect[3]
cellw = w / self.vWidth
cellh = h / self.vHeight
for x in range(0, self.vWidth+1):
renderer.drawLine(xpos + x * cellw, ypos, xpos + x * cellw, ypos + h, pyui.colors.white)
for y in range(0,self.vHeight):
renderer.drawLine(xpos, ypos + y * cellh, xpos + w, ypos + y * cellh, pyui.colors.white)
class ColumnHeaders(pyui.widgets.Panel):
"""The column headers for the GridPanel.
"""
def __init__(self, numColumns):
pyui.widgets.Panel.__init__(self)
self.setLayout(pyui.layouts.TableLayoutManager(numColumns, 1))
for i in range(0, numColumns):
self.addChild( pyui.widgets.Button("---"), (i, 0, 1, 1) )
self.resize(self.rect[2], 22)
def setColumnName(self, columnNum, name):
self.children[columnNum].setText(name)
class RowHeaders(pyui.widgets.Panel):
"""The row headers for the GridPanel.
"""
def __init__(self, numRows):
pyui.widgets.Panel.__init__(self)
self.setLayout(pyui.layouts.TableLayoutManager(1, numRows) )
for i in range(0, numRows):
self.addChild( pyui.widgets.Button("%d" % i), (0, i, 1, 1) )
self.resize(22, self.rect[3])
def setRowName(self, rowNum, name):
self.children[rowNum].setText(name)
| gpl-3.0 | -7,408,309,931,645,807,000 | 34.907489 | 100 | 0.584149 | false | 3.611207 | false | false | false |
mridang/django-eggnog | eggnog/management/commands/checkupdates.py | 1 | 2002 | from threading import Thread
from pkg_resources import *
from django.core.management.base import BaseCommand, CommandError
from yolk.setuptools_support import get_pkglist
from yolk.yolklib import get_highest_version, Distributions
from yolk.pypi import CheeseShop
from eggnog.models import Update
class Command(BaseCommand):
"""
Custom management command for checking for package updates.
"""
help = 'Checks for package updates from PyPi'
threads = []
dists = Distributions()
pypi = CheeseShop()
def __init__(self, *args, **kwargs):
"""
Initializer for the management commands to flush stale data.
"""
super(Command, self).__init__(*args, **kwargs)
Update.objects.all().delete()
def handle(self, *args, **options):
"""
Main management command method that starts the checking process.
"""
print "Checking for updates from PyPi"
for pkg in get_pkglist():
for (dist, active) in self.dists.get_distributions("all", pkg, self.dists.get_highest_installed(pkg)):
thread = Thread(target=self.__check_pypi, args=(dist.project_name, dist.version))
self.threads.append(thread)
thread.start()
for thread in self.threads:
thread.join()
def __check_pypi(self, name, current):
"""
Queries PyPi for updates
"""
(package, versions) = self.pypi.query_versions_pypi(name)
if versions:
newest = get_highest_version(versions)
if newest != current:
if parse_version(current) < parse_version(newest):
print " * Updates for %s are available. You have %s and the latest is %s." % (package, current, newest)
else:
print " * No updates are available for %s." % (package)
Update.objects.create(package=package, installed=current, available=newest)
| bsd-3-clause | -1,709,283,128,330,770,400 | 32.366667 | 123 | 0.608891 | false | 4.259574 | false | false | false |
IdahoDataEngineers/vcardz | vcardz/data.py | 1 | 2577 | #
# Kontexa vCard data structure and processing
#
from email.utils import parseaddr
import re
from six.moves.urllib.parse import urlparse
from .atom import Atom
from .bag import Bag
from .utils import new_id
REX_BEGIN = "^BEGIN:VCARD"
REX_END = "END:VCARD$"
REX_PHONE_NUMBERS = "\+?1? *\(?([0-9]{3})\)?[-. ]?([0-9]{3})[-. ]?([0-9]{4})(?:[,x ]*)([0-9]*)" # noqa
REX_EMAIL = "[a-z0-9!#$%&'*+/=?^_`{|}~-]+(?:\.[a-z0-9!#$%&'*+/=?^_`{|}~-]+)*@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?" # noqa
class FormattedName(Atom):
pass
class Name(Bag):
pass
class Nickname(Atom):
pass
class Photo(Atom):
pass
class Birthday(Atom):
pass
class Email(Atom):
user = ""
domain = ""
def __init__(self, data):
Atom.__init__(self, data)
try:
self.value = self.value.lower()
# temp = re.match(Parser.REX_EMAIL, self.value)
# if not temp:
# self.tag = None
# self.value = None
# return
self.value = parseaddr(self.value)[1].lower()
frags = self.value.split('@')
self.user = frags[0]
self.domain = frags[1]
except IndexError:
pass
class Phone(Atom):
def __init__(self, data):
temp = re.sub('[^0-9]', '', data)
if not temp:
raise ValueError
Atom.__init__(self, data)
match = re.match(REX_PHONE_NUMBERS, self.value)
if None != match:
phone = match.group(1) + "-" + \
match.group(2) + "-" + \
match.group(3)
if "" != match.group(4):
phone += " x" + match.group(4)
self.value = phone
class Address(Bag):
pass
class Label(Bag):
pass
class Organization(Atom):
pass
class Role(Atom):
def __init__(self, data):
Atom.__init__(self, data)
if "- - -" == self.value:
self.tag = None
self.value = None
class Title(Atom):
pass
class Categories(Bag):
pass
class Note(Atom):
pass
class ProdID(Atom):
pass
class Rev(Atom):
pass
class SortString(Atom):
pass
class Url(Atom):
def __init__(self, data):
Atom.__init__(self, data)
o = urlparse(self.value)
if '' == o.scheme:
self.value = 'http://' + self.value
self.value = self.value.replace('http\://', '')
class Mailer(Atom):
pass
class Uid(Atom):
@staticmethod
def create():
return Uid("UID:kontexa;%s" % new_id())
| gpl-2.0 | 9,038,364,064,295,289,000 | 17.810219 | 155 | 0.503686 | false | 3.093637 | false | false | false |
aerkalov/Booktype | lib/booki/editor/management/commands/bookrename.py | 1 | 3217 | # This file is part of Booktype.
# Copyright (c) 2012 Aleksandar Erkalovic <[email protected]>
#
# Booktype is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Booktype is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Booktype. If not, see <http://www.gnu.org/licenses/>.
from django.core.management.base import BaseCommand, CommandError
from optparse import make_option
from django.contrib.auth.models import User
from booki.editor import common
from booki.editor import models
from django.conf import settings
class Command(BaseCommand):
args = "<book name>"
help = "Rename book."
option_list = BaseCommand.option_list + (
make_option('--owner',
action='store',
dest='owner',
default=None,
help='Set new owner of the book.'),
make_option('--new-book-title',
action='store',
dest='new_book_title',
default=None,
help='Set new book title.'),
make_option('--new-book-url',
action='store',
dest='new_book_url',
default=None,
help='Set new book url name.'),
)
requires_model_validation = False
def handle(self, *args, **options):
if len(args) != 1:
raise CommandError("You must specify book name.")
try:
book = models.Book.objects.get(url_title__iexact=args[0])
except models.Book.DoesNotExist:
raise CommandError('Book "%s" does not exist.' % args[0])
if options['new_book_title']:
book.title = options['new_book_title']
if options['new_book_url']:
import os
os.rename('%s/books/%s' % (settings.DATA_ROOT, book.url_title), '%s/books/%s' % (settings.DATA_ROOT, options['new_book_url']))
book.url_title = options['new_book_url']
# TODO: test this
n = len(settings.DATA_ROOT)+len('books/')+1
for attachment in models.Attachment.objects.filter(version__book=book):
name = attachment.attachment.name
j = name[n:].find('/')
newName = '%s/books/%s%s' % (settings.DATA_ROOT, book.url_title, name[n:][j:])
attachment.attachment.name = newName
attachment.save()
if options['owner']:
try:
user = User.objects.get(username=options['owner'])
except User.DoesNotExist:
raise CommandError('User "%s" does not exist. Can not finish import.' % options['owner'])
book.owner = user
book.save()
| agpl-3.0 | 7,888,365,485,220,761,000 | 34.744444 | 138 | 0.588126 | false | 4.272244 | false | false | false |
idegtiarov/gnocchi-rep | gnocchi/ceilometer/utils.py | 1 | 1259 | #
# Copyright 2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from keystoneclient.v2_0 import client as ksclient
from oslo_config import cfg
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
def get_keystone_client():
return ksclient.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
tenant_id=cfg.CONF.service_credentials.os_tenant_id,
tenant_name=cfg.CONF.service_credentials.os_tenant_name,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=cfg.CONF.service_credentials.os_auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure)
| apache-2.0 | -8,211,332,044,180,262,000 | 39.612903 | 75 | 0.746624 | false | 3.735905 | false | false | false |
mshunshin/SegNetCMR | pydicom/charset.py | 1 | 5681 | # charset.py
"""Handle alternate character sets for character strings."""
#
# Copyright (c) 2008-2012 Darcy Mason
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at https://github.com/darcymason/pydicom
#
from pydicom import compat
from pydicom.config import logger
from pydicom.valuerep import PersonNameUnicode, text_VRs
from pydicom.compat import in_py2
# Map DICOM Specific Character Set to python equivalent
python_encoding = {
'': 'iso8859', # default character set for DICOM
'ISO_IR 6': 'iso8859', # alias for latin_1 too
'ISO_IR 100': 'latin_1',
'ISO_IR 101': 'iso8859_2',
'ISO_IR 109': 'iso8859_3',
'ISO_IR 110': 'iso8859_4',
'ISO_IR 126': 'iso_ir_126', # Greek
'ISO_IR 127': 'iso_ir_127', # Arab
'ISO_IR 138': 'iso_ir_138', # Hebrew
'ISO_IR 144': 'iso_ir_144', # Russian
'ISO_IR 148': 'iso8859_5',
# Thai 'ISO_IR 166': 'XXXXX', No idea what this maps too
'ISO 2022 IR 6': 'iso8859', # alias for latin_1 too
'ISO 2022 IR 13': 'shift_jis',
'ISO 2022 IR 87': 'iso2022_jp',
'ISO 2022 IR 100': 'latin_1',
'ISO 2022 IR 101': 'iso8859_2',
'ISO 2022 IR 109': 'iso8859_3',
'ISO 2022 IR 110': 'iso8859_4',
'ISO 2022 IR 126': 'iso_ir_126',
'ISO 2022 IR 127': 'iso_ir_127', # Arab
'ISO 2022 IR 138': 'iso_ir_138',
'ISO 2022 IR 144': 'iso_ir_144',
'ISO 2022 IR 148': 'iso8859_5',
'ISO 2022 IR 149': 'euc_kr', # needs cleanup via clean_escseq()
# Japanesse 'ISO 2022 IR 159': 'XXXX',
'ISO_IR 192': 'UTF8', # from Chinese example, 2008 PS3.5 Annex J p1-4
'GB18030': 'GB18030',
}
default_encoding = "iso8859"
def clean_escseq(element, encodings):
"""Remove escape sequences that Python does not remove from
Korean encoding ISO 2022 IR 149 due to the G1 code element.
"""
if 'euc_kr' in encodings:
return element.replace(
"\x1b\x24\x29\x43", "").replace("\x1b\x28\x42", "")
else:
return element
# DICOM PS3.5-2008 6.1.1 (p 18) says:
# default is ISO-IR 6 G0, equiv to common chr set of ISO 8859 (PS3.5 6.1.2.1)
# (0008,0005) value 1 can *replace* the default encoding...
# for VRs of SH, LO, ST, LT, PN and UT (PS3.5 6.1.2.3)...
# with a single-byte character encoding
# if (0008,0005) is multi-valued, then value 1 (or default if blank)...
# is used until code extension escape sequence is hit,
# which can be at start of string, or after CR/LF, FF, or
# in Person Name PN, after ^ or =
# NOTE also that 7.5.3 SEQUENCE INHERITANCE states that if (0008,0005)
# is not present in a sequence item then it is inherited from its parent.
def convert_encodings(encodings):
"""Converts DICOM encodings into corresponding python encodings"""
# If a list if passed, we don't want to modify the list in place so copy it
encodings = encodings[:]
if isinstance(encodings, compat.string_types):
encodings = [encodings]
elif not encodings[0]:
encodings[0] = 'ISO_IR 6'
try:
encodings = [python_encoding[x] for x in encodings]
except KeyError: # Assume that it is already the python encoding (is there a way to check this?)
pass
if len(encodings) == 1:
encodings = [encodings[0]] * 3
elif len(encodings) == 2:
encodings.append(encodings[1])
return encodings
def decode(data_element, dicom_character_set):
"""Apply the DICOM character encoding to the data element
data_element -- DataElement instance containing a value to convert
dicom_character_set -- the value of Specific Character Set (0008,0005),
which may be a single value,
a multiple value (code extension), or
may also be '' or None.
If blank or None, ISO_IR 6 is used.
"""
if not dicom_character_set:
dicom_character_set = ['ISO_IR 6']
encodings = convert_encodings(dicom_character_set)
# decode the string value to unicode
# PN is special case as may have 3 components with differenct chr sets
if data_element.VR == "PN":
# logger.warn("%s ... type: %s" %(str(data_element), type(data_element.VR)))
if not in_py2:
if data_element.VM == 1:
data_element.value = data_element.value.decode(encodings)
else:
data_element.value = [val.decode(encodings) for val in data_element.value]
else:
if data_element.VM == 1:
data_element.value = PersonNameUnicode(data_element.value, encodings)
else:
data_element.value = [PersonNameUnicode(value, encodings)
for value in data_element.value]
if data_element.VR in text_VRs:
# Remove the first encoding if this is a multi-byte encoding
if len(encodings) > 1:
del encodings[0]
# You can't re-decode unicode (string literals in py3)
if data_element.VM == 1:
if isinstance(data_element.value, compat.text_type):
return
data_element.value = clean_escseq(
data_element.value.decode(encodings[0]), encodings)
else:
output = list()
for value in data_element.value:
if isinstance(value, compat.text_type):
output.append(value)
else:
output.append(clean_escseq(value.decode(encodings[0]), encodings))
data_element.value = output
| mit | 4,459,642,833,566,833,000 | 36.375 | 101 | 0.607816 | false | 3.487416 | false | false | false |
Krigu/python_fun | Heidi/FileParser.py | 1 | 1629 | TORCH_START_VALUE = 15
THING_START_VALUE = 20
INVISIBLE_START_VALUE = 21
FANTASTIC_START_VALUE = 3
class Story:
heros = ["Heidi", "Fantastic", "Tourch", "Thing", "Invisible"]
heidi = 0
fantastic = FANTASTIC_START_VALUE
torch = TORCH_START_VALUE
thing = THING_START_VALUE
invisible = INVISIBLE_START_VALUE
def act1_scene1(self):
self.fantastic = 1
self.invisible = INVISIBLE_START_VALUE
if self.fantastic == self.invisible:
self.act1_scene2()
else:
self.torch = 4
print(self.fantastic)
self.act1_scene2()
def act1_scene2(self):
self.thing = THING_START_VALUE
self.fantastic = 2
self.act1_scene3()
def act1_scene3(self):
if self.thing <= 1:
self.act1_scene4()
else:
self.fantastic = 4
self.thing -= 1
self.act1_scene3()
def act1_scene4(self):
self.invisible += self.fantastic / 2
self.torch -= 1
if self.thing <= self.torch:
self.act1_scene2()
else:
print(self.invisible)
self.act1_scene3()
def act2_scene1(self):
self.torch = 0
print(self.torch)
self.torch = TORCH_START_VALUE
self.act2_scene2()
def act2_scene2(self):
if self.torch % 2 == 1:
print(self.fantastic)
else:
self.thing = self.torch / 2
self.fantastic += 1
self.torch = self.thing
if self.fantastic <= 32:
self.act2_scene2()
Story().act1_scene1()
| gpl-3.0 | 7,884,140,570,094,559,000 | 23.313433 | 66 | 0.54205 | false | 3.351852 | false | false | false |
mahabs/nitro | nssrc/com/citrix/netscaler/nitro/resource/config/utility/techsupport_args.py | 1 | 1326 | #
# Copyright (c) 2008-2015 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class techsupport_args :
""" Provides additional arguments required for fetching the techsupport resource.
"""
def __init__(self) :
self._scope = ""
@property
def scope(self) :
"""Use this option to run showtechsupport on present node or all cluster nodes.<br/>Default value: NODE<br/>Possible values = NODE, CLUSTER.
"""
try :
return self._scope
except Exception as e:
raise e
@scope.setter
def scope(self, scope) :
"""Use this option to run showtechsupport on present node or all cluster nodes.<br/>Default value: NODE<br/>Possible values = NODE, CLUSTER
"""
try :
self._scope = scope
except Exception as e:
raise e
class Scope:
NODE = "NODE"
CLUSTER = "CLUSTER"
| apache-2.0 | 4,549,954,713,833,272,300 | 28.466667 | 142 | 0.706637 | false | 3.574124 | false | false | false |
cheapjack/MemoryCraft | MemoryCloud1.py | 1 | 1993 | #!/usr/bin/python
#Install the modules we need
#from pyfirmata import Arduino, util, INPUT
from mcpi import minecraft
from mcpi import minecraftstuff
from time import sleep
import server
import serial
# Set up a connection to the Arduino/Shrimp if we need it
#PORT = "/dev/tty.SLAB_USBtoUART"
#ser = serial.Serial(PORT, 9600)
# Connect to the server: we use the imported server.py to make it work with CloudMaker
mc = minecraft.Minecraft.create(server.address)
#Post a message to the minecraft chat window
mc.postToChat("Ready to read Memory!")
# Use the command /getpos or F3 in Minecraft client to find out where you are then use those
# x, y, z coordinates to build things
# translate CloudMaker coords for mcpi ones
# add this to x
mcx = 177
# - this from y
mcy = 64
# - this from z
mcz = 135
# Text Bubble 1
def MemoryCloud1(startx,starty,startz, chartwidth, chartheight, chartdepth, blocktype, blockid):
# Main Bubble
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + chartwidth, (starty-mcy) + chartheight, (startz - mcz) + chartdepth, blocktype, blockid)
# inset bottom
mc.setBlocks((startx + mcx) + 1, (starty-mcy) - 1, (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) -1, (startz - mcz) + chartdepth, blocktype, blockid)
#inset top
mc.setBlocks((startx + mcx) + 1, (starty-mcy) + (chartheight + 1), (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) + (chartheight + 1), (startz - mcz) + chartdepth, blocktype, blockid)
# If you want to add a bubble diagram, insert your coordinates
# Then use /js blocktype("My Message", blockid) while facing the block where you want to write
#MemoryCloud1(-343, 75, -15, 44, 14, 2, 35, 0)
#MemoryCloud1(-343, 110, -15, 44, 14, 2, 35, 0)
#MemoryCloud1(-343, 75, -15, 44, 14, 2, 0)
#MemoryCloud1(-343, 100, -15, 44, 14, 2, 0)
# the memory cloud funtction is (myposx, myposy, myposz, width, height, thickness,
# blocktype, blockidoption)
MemoryCloud1(332, 100, -1185, 44, 4, 2, 35, 0)
#
| mit | 4,954,500,081,771,976,000 | 35.907407 | 197 | 0.707978 | false | 2.733882 | false | false | false |
Bobox214/ZemkaBot | tools/kbHit.py | 1 | 1619 | import sys
import termios
import atexit
from select import select
class KBHit(object):
def __init__(self):
'''Creates a KBHit object that you can call to do various keyboard things.
'''
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit
atexit.register(self.set_normal_term)
def set_normal_term(self):
''' Resets to normal terminal. On Windows this is a no-op.
'''
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def getch(self):
''' Returns a keyboard character after kbhit() has been called.
Should not be called in the same program as getarrow().
'''
return sys.stdin.read(1)
def getarrow(self):
''' Returns an arrow-key code after kbhit() has been called. Codes are
0 : up
1 : right
2 : down
3 : left
Should not be called in the same program as getch().
'''
c = sys.stdin.read(3)[2]
vals = [65, 67, 66, 68]
return vals.index(ord(c.decode('utf-8')))
def kbhit(self):
''' Returns True if keyboard character was hit, False otherwise.
'''
dr,dw,de = select([sys.stdin], [], [], 0)
return dr != []
# Test
if __name__ == "__main__":
kb = KBHit()
print('Hit any key, or ESC to exit')
while True:
if kb.kbhit():
c = kb.getch()
if ord(c) == 27: # ESC
break
print(c)
kb.set_normal_term()
| mit | 9,086,489,840,359,184,000 | 22.128571 | 76 | 0.650401 | false | 2.922383 | false | false | false |
axce1/PyProjects | Graphics/watermark.py | 1 | 1104 | import Image, ImageEnhance
def add_watermark(image, watermark, opacity=1, wm_interval=None):
assert opacity >= 0 and opacity <= 1
if opacity < 1:
if watermark.mode != 'RGBA':
watermark = watermark.convert('RGBA')
else:
watermark = watermark.copy()
alpha = watermark.split()[3]
alpha = ImageEnhance.Brightness(alpha).enhance(opacity)
watermark.putalpha(alpha)
layer = Image.new('RGBA', image.size, (0,0,0,0))
if wm_interval:
for y in range(0, image.size[1], watermark.size[1]+wm_interval):
for x in range(0, image.size[0], watermark.size[0]+wm_interval):
layer.paste(watermark, (x, y))
else:
layer.paste(watermark, (0,image.size[0]))
return Image.composite(layer, image, layer)
if __name__ == "__main__":
import sys
if len(sys.argv) < 3:
print ('Usage: python watermark.py image-file watermark-image-file')
sys.exit(1)
img = Image.open(sys.argv[1])
wm = Image.open(sys.argv[2])
add_watermark(img, wm, 0.4, 100).save("image_wm.png")
| gpl-2.0 | 2,413,271,779,597,631,000 | 33.5 | 76 | 0.601449 | false | 3.266272 | false | false | false |
codingenesis/ansible_mysql_rds_playbook | hack/rds.py | 1 | 44216 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: rds
version_added: "1.3"
short_description: create, delete, or modify an Amazon rds instance
description:
- Creates, deletes, or modifies rds instances. When creating an instance it can be either a new instance or a read-only replica of an existing instance. This module has a dependency on python-boto >= 2.5. The 'promote' command requires boto >= 2.18.0. Certain features such as tags rely on boto.rds2 (boto >= 2.26.0)
options:
command:
description:
- Specifies the action to take. The 'reboot' option is available starting at version 2.0
required: true
choices: [ 'create', 'replicate', 'delete', 'facts', 'modify' , 'promote', 'snapshot', 'reboot', 'restore' ]
instance_name:
description:
- Database instance identifier. Required except when using command=facts or command=delete on just a snapshot
required: false
default: null
source_instance:
description:
- Name of the database to replicate. Used only when command=replicate.
required: false
default: null
db_engine:
description:
- The type of database. Used only when command=create.
required: false
default: null
choices: [ 'MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres']
size:
description:
- Size in gigabytes of the initial storage for the DB instance. Used only when command=create or command=modify.
required: false
default: null
instance_type:
description:
- The instance type of the database. Must be specified when command=create. Optional when command=replicate, command=modify or command=restore. If not specified then the replica inherits the same instance type as the source instance.
required: false
default: null
username:
description:
- Master database username. Used only when command=create.
required: false
default: null
password:
description:
- Password for the master database username. Used only when command=create or command=modify.
required: false
default: null
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: true
aliases: [ 'aws_region', 'ec2_region' ]
db_name:
description:
- Name of a database to create within the instance. If not specified then no database is created. Used only when command=create.
required: false
default: null
engine_version:
description:
- Version number of the database engine to use. Used only when command=create. If not specified then the current Amazon RDS default engine version is used.
required: false
default: null
parameter_group:
description:
- Name of the DB parameter group to associate with this instance. If omitted then the RDS default DBParameterGroup will be used. Used only when command=create or command=modify.
required: false
default: null
license_model:
description:
- The license model for this DB instance. Used only when command=create or command=restore.
required: false
default: null
choices: [ 'license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license' ]
multi_zone:
description:
- Specifies if this is a Multi-availability-zone deployment. Can not be used in conjunction with zone parameter. Used only when command=create or command=modify.
choices: [ "yes", "no" ]
required: false
default: null
iops:
description:
- Specifies the number of IOPS for the instance. Used only when command=create or command=modify. Must be an integer greater than 1000.
required: false
default: null
security_groups:
description:
- Comma separated list of one or more security groups. Used only when command=create or command=modify.
required: false
default: null
vpc_security_groups:
description:
- Comma separated list of one or more vpc security group ids. Also requires `subnet` to be specified. Used only when command=create or command=modify.
required: false
default: null
port:
description:
- Port number that the DB instance uses for connections. Used only when command=create or command=replicate.
- Prior to 2.0 it always defaults to null and the API would use 3306, it had to be set to other DB default values when not using MySql.
Starting at 2.0 it auotmaticaly defaults to what is expected for each c(db_engine).
required: false
default: 3306 for mysql, 1521 for Oracle, 1433 for SQL Server, 5432 for PostgreSQL.
upgrade:
description:
- Indicates that minor version upgrades should be applied automatically. Used only when command=create or command=replicate.
required: false
default: no
choices: [ "yes", "no" ]
option_group:
description:
- The name of the option group to use. If not specified then the default option group is used. Used only when command=create.
required: false
default: null
maint_window:
description:
- "Maintenance window in format of ddd:hh24:mi-ddd:hh24:mi. (Example: Mon:22:00-Mon:23:15) If not specified then a random maintenance window is assigned. Used only when command=create or command=modify."
required: false
default: null
backup_window:
description:
- Backup window in format of hh24:mi-hh24:mi. If not specified then a random backup window is assigned. Used only when command=create or command=modify.
required: false
default: null
backup_retention:
description:
- "Number of days backups are retained. Set to 0 to disable backups. Default is 1 day. Valid range: 0-35. Used only when command=create or command=modify."
required: false
default: null
zone:
description:
- availability zone in which to launch the instance. Used only when command=create, command=replicate or command=restore.
required: false
default: null
aliases: ['aws_zone', 'ec2_zone']
subnet:
description:
- VPC subnet group. If specified then a VPC instance is created. Used only when command=create.
required: false
default: null
snapshot:
description:
- Name of snapshot to take. When command=delete, if no snapshot name is provided then no snapshot is taken. If used with command=delete with no instance_name, the snapshot is deleted. Used with command=facts, command=delete or command=snapshot.
required: false
default: null
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
wait:
description:
- When command=create, replicate, modify or restore then wait for the database to enter the 'available' state. When command=delete wait for the database to be terminated.
required: false
default: "no"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 300
apply_immediately:
description:
- Used only when command=modify. If enabled, the modifications will be applied as soon as possible rather than waiting for the next preferred maintenance window.
default: no
choices: [ "yes", "no" ]
force_failover:
description:
- Used only when command=reboot. If enabled, the reboot is done using a MultiAZ failover.
required: false
default: "no"
choices: [ "yes", "no" ]
version_added: "2.0"
new_instance_name:
description:
- Name to rename an instance to. Used only when command=modify.
required: false
default: null
version_added: "1.5"
character_set_name:
description:
- Associate the DB instance with a specified character set. Used with command=create.
required: false
default: null
version_added: "1.9"
publicly_accessible:
description:
- explicitly set whether the resource should be publicly accessible or not. Used with command=create, command=replicate. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
tags:
description:
- tags dict to apply to a resource. Used with command=create, command=replicate, command=restore. Requires boto >= 2.26.0
required: false
default: null
version_added: "1.9"
requirements:
- "python >= 2.6"
- "boto"
author:
- "Bruce Pennypacker (@bpennypacker)"
- "Will Thames (@willthames)"
'''
# FIXME: the command stuff needs a 'state' like alias to make things consistent -- MPD
EXAMPLES = '''
# Basic mysql provisioning example
- rds:
command: create
instance_name: new-database
db_engine: MySQL
size: 10
instance_type: db.m1.small
username: mysql_admin
password: 1nsecure
tags:
Environment: testing
Application: cms
# Create a read-only replica and wait for it to become available
- rds:
command: replicate
instance_name: new-database-replica
source_instance: new_database
wait: yes
wait_timeout: 600
# Delete an instance, but create a snapshot before doing so
- rds:
command: delete
instance_name: new-database
snapshot: new_database_snapshot
# Get facts about an instance
- rds:
command: facts
instance_name: new-database
register: new_database_facts
# Rename an instance and wait for the change to take effect
- rds:
command: modify
instance_name: new-database
new_instance_name: renamed-database
wait: yes
# Reboot an instance and wait for it to become available again
- rds
command: reboot
instance_name: database
wait: yes
# Restore a Postgres db instance from a snapshot, wait for it to become available again, and
# then modify it to add your security group. Also, display the new endpoint.
# Note that the "publicly_accessible" option is allowed here just as it is in the AWS CLI
- local_action:
module: rds
command: restore
snapshot: mypostgres-snapshot
instance_name: MyNewInstanceName
region: us-west-2
zone: us-west-2b
subnet: default-vpc-xx441xxx
publicly_accessible: yes
wait: yes
wait_timeout: 600
tags:
Name: pg1_test_name_tag
register: rds
- local_action:
module: rds
command: modify
instance_name: MyNewInstanceName
region: us-west-2
vpc_security_groups: sg-xxx945xx
- debug: msg="The new db endpoint is {{ rds.instance.endpoint }}"
'''
import sys
import time
try:
import boto.rds
HAS_BOTO = True
except ImportError:
HAS_BOTO = False
try:
import boto.rds2
has_rds2 = True
except ImportError:
has_rds2 = False
DEFAULT_PORTS= {
'mysql': 3306,
'oracle': 1521,
'sqlserver': 1433,
'postgres': 5432,
}
class RDSException(Exception):
def __init__(self, exc):
if hasattr(exc, 'error_message') and exc.error_message:
self.message = exc.error_message
self.code = exc.error_code
elif hasattr(exc, 'body') and 'Error' in exc.body:
self.message = exc.body['Error']['Message']
self.code = exc.body['Error']['Code']
else:
self.message = str(exc)
self.code = 'Unknown Error'
class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
except boto.exception.BotoServerError, e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
except boto.exception.BotoServerError, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
params['engine'] = db_engine
try:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
except boto.exception.BotoServerError, e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
except boto.rds2.exceptions.DBInstanceNotFound, e:
return None
except Exception, e:
raise e
def get_db_snapshot(self, snapshotid):
try:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
except boto.rds2.exceptions.DBSnapshotNotFound, e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
username, password, **params):
try:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
except boto.exception.BotoServerError, e:
raise RDSException(e)
class RDSDBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
self.name = dbinstance.id
self.status = dbinstance.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.instance.create_time,
'status' : self.status,
'availability_zone' : self.instance.availability_zone,
'backup_retention' : self.instance.backup_retention_period,
'backup_window' : self.instance.preferred_backup_window,
'maintenance_window' : self.instance.preferred_maintenance_window,
'multi_zone' : self.instance.multi_az,
'instance_type' : self.instance.instance_class,
'username' : self.instance.master_username,
'iops' : self.instance.iops
}
# Endpoint exists only if the instance is available
if self.status == 'available':
d["endpoint"] = self.instance.endpoint[0]
d["port"] = self.instance.endpoint[1]
if self.instance.vpc_security_groups is not None:
d["vpc_security_groups"] = ','.join(x.vpc_group for x in self.instance.vpc_security_groups)
else:
d["vpc_security_groups"] = None
else:
d["endpoint"] = None
d["port"] = None
d["vpc_security_groups"] = None
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
except Exception, e:
d["replication_source"] = None
return d
class RDS2DBInstance:
def __init__(self, dbinstance):
self.instance = dbinstance
if 'DBInstanceIdentifier' not in dbinstance:
self.name = None
else:
self.name = self.instance.get('DBInstanceIdentifier')
self.status = self.instance.get('DBInstanceStatus')
def get_data(self):
d = {
'id': self.name,
'create_time': self.instance['InstanceCreateTime'],
'status': self.status,
'availability_zone': self.instance['AvailabilityZone'],
'backup_retention': self.instance['BackupRetentionPeriod'],
'maintenance_window': self.instance['PreferredMaintenanceWindow'],
'multi_zone': self.instance['MultiAZ'],
'instance_type': self.instance['DBInstanceClass'],
'username': self.instance['MasterUsername'],
'iops': self.instance['Iops'],
'replication_source': self.instance['ReadReplicaSourceDBInstanceIdentifier']
}
if self.instance["VpcSecurityGroups"] is not None:
d['vpc_security_groups'] = ','.join(x['VpcSecurityGroupId'] for x in self.instance['VpcSecurityGroups'])
if self.status == 'available':
d['endpoint'] = self.instance["Endpoint"]["Address"]
d['port'] = self.instance["Endpoint"]["Port"]
else:
d['endpoint'] = None
d['port'] = None
return d
class RDSSnapshot:
def __init__(self, snapshot):
self.snapshot = snapshot
self.name = snapshot.id
self.status = snapshot.status
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot.snapshot_create_time,
'status' : self.status,
'availability_zone' : self.snapshot.availability_zone,
'instance_id' : self.snapshot.instance_id,
'instance_created' : self.snapshot.instance_create_time,
}
# needs boto >= 2.21.0
if hasattr(self.snapshot, 'snapshot_type'):
d["snapshot_type"] = self.snapshot.snapshot_type
if hasattr(self.snapshot, 'iops'):
d["iops"] = self.snapshot.iops
return d
class RDS2Snapshot:
def __init__(self, snapshot):
if 'DeleteDBSnapshotResponse' in snapshot:
self.snapshot = snapshot['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
else:
self.snapshot = snapshot
self.name = self.snapshot.get('DBSnapshotIdentifier')
self.status = self.snapshot.get('Status')
def get_data(self):
d = {
'id' : self.name,
'create_time' : self.snapshot['SnapshotCreateTime'],
'status' : self.status,
'availability_zone' : self.snapshot['AvailabilityZone'],
'instance_id' : self.snapshot['DBInstanceIdentifier'],
'instance_created' : self.snapshot['InstanceCreateTime'],
'snapshot_type' : self.snapshot['SnapshotType'],
'iops' : self.snapshot['Iops'],
}
return d
def await_resource(conn, resource, status, module):
wait_timeout = module.params.get('wait_timeout') + time.time()
while wait_timeout > time.time() and resource.status != status:
time.sleep(5)
if wait_timeout <= time.time():
module.fail_json(msg="Timeout waiting for RDS resource %s" % resource.name)
if module.params.get('command') == 'snapshot':
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS snapshot %s" % resource.snapshot)
resource = conn.get_db_snapshot(resource.name)
else:
# Temporary until all the rds2 commands have their responses parsed
if resource.name is None:
module.fail_json(msg="There was a problem waiting for RDS instance %s" % resource.instance)
resource = conn.get_db_instance(resource.name)
if resource is None:
break
return resource
def create_db_instance(module, conn):
subnet = module.params.get('subnet')
required_vars = ['instance_name', 'db_engine', 'size', 'instance_type', 'username', 'password']
valid_vars = ['backup_retention', 'backup_window',
'character_set_name', 'db_name', 'engine_version',
'instance_type', 'iops', 'license_model', 'maint_window',
'multi_zone', 'option_group', 'parameter_group','port',
'subnet', 'upgrade', 'zone']
if module.params.get('subnet'):
valid_vars.append('vpc_security_groups')
else:
valid_vars.append('security_groups')
if has_rds2:
valid_vars.extend(['publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance(instance_name, module.params.get('size'),
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
except RDSException, e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def replicate_db_instance(module, conn):
required_vars = ['instance_name', 'source_instance']
valid_vars = ['instance_type', 'port', 'upgrade', 'zone']
if has_rds2:
valid_vars.extend(['iops', 'option_group', 'publicly_accessible', 'tags'])
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
source_instance = module.params.get('source_instance')
result = conn.get_db_instance(instance_name)
if result:
changed = False
else:
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
except RDSException, e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def delete_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot', 'skip_final_snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if not instance_name:
result = conn.get_db_snapshot(snapshot)
else:
result = conn.get_db_instance(instance_name)
if not result:
module.exit_json(changed=False)
if result.status == 'deleting':
module.exit_json(changed=False)
try:
if instance_name:
if snapshot:
params["skip_final_snapshot"] = False
if has_rds2:
params["final_db_snapshot_identifier"] = snapshot
else:
params["final_snapshot_id"] = snapshot
else:
params["skip_final_snapshot"] = True
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
except RDSException, e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
# so just return
if not module.params.get('wait'):
module.exit_json(changed=True)
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
except RDSException, e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
except Exception, e:
module.fail_json(msg=str(e))
def facts_db_instance_or_snapshot(module, conn):
required_vars = []
valid_vars = ['instance_name', 'snapshot']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
if instance_name and snapshot:
module.fail_json(msg="Facts must be called with either instance_name or snapshot, not both")
if instance_name:
resource = conn.get_db_instance(instance_name)
if not resource:
module.fail_json(msg="DB instance %s does not exist" % instance_name)
if snapshot:
resource = conn.get_db_snapshot(snapshot)
if not resource:
module.fail_json(msg="DB snapshot %s does not exist" % snapshot)
module.exit_json(changed=False, instance=resource.get_data())
def modify_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['apply_immediately', 'backup_retention', 'backup_window',
'db_name', 'engine_version', 'instance_type', 'iops', 'license_model',
'maint_window', 'multi_zone', 'new_instance_name',
'option_group', 'parameter_group', 'password', 'size', 'upgrade2', 'upgrade' ]
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
new_instance_name = module.params.get('new_instance_name')
try:
result = conn.modify_db_instance(instance_name, **params)
except RDSException, e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
# Wait until the new instance name is valid
new_instance = None
while not new_instance:
new_instance = conn.get_db_instance(new_instance_name)
time.sleep(5)
# Found instance but it briefly flicks to available
# before rebooting so let's wait until we see it rebooting
# before we check whether to 'wait'
result = await_resource(conn, new_instance, 'rebooting', module)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
# guess that this changed the DB, need a way to check
module.exit_json(changed=True, instance=resource.get_data())
def promote_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = ['backup_retention', 'backup_window']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
if not result:
module.fail_json(msg="DB Instance %s does not exist" % instance_name)
if result.get_data().get('replication_source'):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
else:
changed = False
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def snapshot_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['tags']
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_snapshot(snapshot)
if not result:
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_snapshot(snapshot)
module.exit_json(changed=changed, snapshot=resource.get_data())
def reboot_db_instance(module, conn):
required_vars = ['instance_name']
valid_vars = []
if has_rds2:
valid_vars.append('force_failover')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
result = conn.get_db_instance(instance_name)
changed = False
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def restore_db_instance(module, conn):
required_vars = ['instance_name', 'snapshot']
valid_vars = ['db_name', 'iops', 'license_model', 'multi_zone',
'option_group', 'port', 'publicly_accessible',
'subnet', 'tags', 'upgrade', 'zone']
if has_rds2:
valid_vars.append('instance_type')
else:
required_vars.append('instance_type')
params = validate_parameters(required_vars, valid_vars, module)
instance_name = module.params.get('instance_name')
instance_type = module.params.get('instance_type')
snapshot = module.params.get('snapshot')
changed = False
result = conn.get_db_instance(instance_name)
if not result:
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
except RDSException, e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
resource = await_resource(conn, result, 'available', module)
else:
resource = conn.get_db_instance(instance_name)
module.exit_json(changed=changed, instance=resource.get_data())
def validate_parameters(required_vars, valid_vars, module):
command = module.params.get('command')
for v in required_vars:
if not module.params.get(v):
module.fail_json(msg="Parameter %s required for %s command" % (v, command))
# map to convert rds module options to boto rds and rds2 options
optional_params = {
'port': 'port',
'db_name': 'db_name',
'zone': 'availability_zone',
'maint_window': 'preferred_maintenance_window',
'backup_window': 'preferred_backup_window',
'backup_retention': 'backup_retention_period',
'multi_zone': 'multi_az',
'engine_version': 'engine_version',
'upgrade': 'auto_minor_version_upgrade',
'upgrade2': 'allow_major_version_upgrade',
'subnet': 'db_subnet_group_name',
'license_model': 'license_model',
'option_group': 'option_group_name',
'size': 'allocated_storage',
'iops': 'iops',
'new_instance_name': 'new_instance_id',
'apply_immediately': 'apply_immediately',
}
# map to convert rds module options to boto rds options
optional_params_rds = {
'db_engine': 'engine',
'password': 'master_password',
'parameter_group': 'param_group',
'instance_type': 'instance_class',
}
# map to convert rds module options to boto rds2 options
optional_params_rds2 = {
'tags': 'tags',
'publicly_accessible': 'publicly_accessible',
'parameter_group': 'db_parameter_group_name',
'character_set_name': 'character_set_name',
'instance_type': 'db_instance_class',
'password': 'master_user_password',
'new_instance_name': 'new_db_instance_identifier',
'force_failover': 'force_failover',
}
if has_rds2:
optional_params.update(optional_params_rds2)
sec_group = 'db_security_groups'
else:
optional_params.update(optional_params_rds)
sec_group = 'security_groups'
# Check for options only supported with rds2
for k in set(optional_params_rds2.keys()) - set(optional_params_rds.keys()):
if module.params.get(k):
module.fail_json(msg="Parameter %s requires boto.rds (boto >= 2.26.0)" % k)
params = {}
for (k, v) in optional_params.items():
if module.params.get(k) and k not in required_vars:
if k in valid_vars:
params[v] = module.params[k]
else:
module.fail_json(msg="Parameter %s is not valid for %s command" % (k, command))
if module.params.get('security_groups'):
params[sec_group] = module.params.get('security_groups').split(',')
vpc_groups = module.params.get('vpc_security_groups')
if vpc_groups:
if has_rds2:
params['vpc_security_group_ids'] = vpc_groups
else:
groups_list = []
for x in vpc_groups:
groups_list.append(boto.rds.VPCSecurityGroupMembership(vpc_group=x))
params['vpc_security_groups'] = groups_list
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
params['tags'] = module.params['tags'].items()
return params
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
command = dict(choices=['create', 'replicate', 'delete', 'facts', 'modify', 'promote', 'snapshot', 'reboot', 'restore'], required=True),
instance_name = dict(required=False),
source_instance = dict(required=False),
db_engine = dict(choices=['MySQL', 'oracle-se1', 'oracle-se', 'oracle-ee', 'sqlserver-ee', 'sqlserver-se', 'sqlserver-ex', 'sqlserver-web', 'postgres'], required=False),
size = dict(required=False),
instance_type = dict(aliases=['type'], required=False),
username = dict(required=False),
password = dict(no_log=True, required=False),
db_name = dict(required=False),
engine_version = dict(required=False),
parameter_group = dict(required=False),
license_model = dict(choices=['license-included', 'bring-your-own-license', 'general-public-license', 'postgresql-license'], required=False),
multi_zone = dict(type='bool', default=False),
iops = dict(required=False),
security_groups = dict(required=False),
vpc_security_groups = dict(type='list', required=False),
port = dict(required=False),
upgrade = dict(type='bool', default=False),
upgrade2 = dict(type='bool', default=False),
option_group = dict(required=False),
maint_window = dict(required=False),
backup_window = dict(required=False),
backup_retention = dict(required=False),
zone = dict(aliases=['aws_zone', 'ec2_zone'], required=False),
subnet = dict(required=False),
wait = dict(type='bool', default=False),
wait_timeout = dict(type='int', default=300),
snapshot = dict(required=False),
apply_immediately = dict(type='bool', default=False),
new_instance_name = dict(required=False),
tags = dict(type='dict', required=False),
publicly_accessible = dict(required=False),
character_set_name = dict(required=False),
force_failover = dict(type='bool', required=False, default=False)
)
)
module = AnsibleModule(
argument_spec=argument_spec,
)
if not HAS_BOTO:
module.fail_json(msg='boto required for this module')
invocations = {
'create': create_db_instance,
'replicate': replicate_db_instance,
'delete': delete_db_instance_or_snapshot,
'facts': facts_db_instance_or_snapshot,
'modify': modify_db_instance,
'promote': promote_db_instance,
'snapshot': snapshot_db_instance,
'reboot': reboot_db_instance,
'restore': restore_db_instance,
}
region, ec2_url, aws_connect_params = get_aws_connection_info(module)
if not region:
module.fail_json(msg="Region not specified. Unable to determine region from EC2_REGION.")
# set port to per db defaults if not specified
if module.params['port'] is None and module.params['db_engine'] is not None and module.params['command'] == 'create':
if '-' in module.params['db_engine']:
engine = module.params['db_engine'].split('-')[0]
else:
engine = module.params['db_engine']
module.params['port'] = DEFAULT_PORTS[engine.lower()]
# connect to the rds endpoint
if has_rds2:
conn = RDS2Connection(module, region, **aws_connect_params)
else:
conn = RDSConnection(module, region, **aws_connect_params)
invocations[module.params.get('command')](module, conn)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| gpl-3.0 | 3,260,466,123,463,195,600 | 39.087035 | 322 | 0.635811 | false | 4.014891 | false | false | false |
jnimmo/pyenvisalink | pyenvisalink/honeywell_client.py | 1 | 10341 | import logging
import json
import re
import asyncio
from pyenvisalink import EnvisalinkClient
from pyenvisalink.honeywell_envisalinkdefs import *
_LOGGER = logging.getLogger(__name__)
class HoneywellClient(EnvisalinkClient):
"""Represents a honeywell alarm client."""
@asyncio.coroutine
def keep_alive(self):
"""Send a keepalive command to reset it's watchdog timer."""
while not self._shutdown:
if self._loggedin:
self.send_command(evl_Commands['KeepAlive'], '')
yield from asyncio.sleep(self._alarmPanel.keepalive_interval, loop=self._eventLoop)
@asyncio.coroutine
def periodic_zone_timer_dump(self):
"""Used to periodically get the zone timers to make sure our zones are updated."""
while not self._shutdown:
if self._loggedin:
self.dump_zone_timers()
yield from asyncio.sleep(self._alarmPanel.zone_timer_interval, loop=self._eventLoop)
def send_command(self, code, data):
"""Send a command in the proper honeywell format."""
to_send = '^' + code + ',' + data + '$'
self.send_data(to_send)
def dump_zone_timers(self):
"""Send a command to dump out the zone timers."""
self.send_command(evl_Commands['DumpZoneTimers'], '')
def keypresses_to_partition(self, partitionNumber, keypresses):
"""Send keypresses to a particular partition."""
for char in keypresses:
self.send_command(evl_Commands['PartitionKeypress'], str.format("{0},{1}", partitionNumber, char))
def arm_stay_partition(self, code, partitionNumber):
"""Public method to arm/stay a partition."""
self.keypresses_to_partition(partitionNumber, code + '3')
def arm_away_partition(self, code, partitionNumber):
"""Public method to arm/away a partition."""
self.keypresses_to_partition(partitionNumber, code + '2')
def arm_max_partition(self, code, partitionNumber):
"""Public method to arm/max a partition."""
self.keypresses_to_partition(partitionNumber, code + '4')
def disarm_partition(self, code, partitionNumber):
"""Public method to disarm a partition."""
self.keypresses_to_partition(partitionNumber, code + '1')
def panic_alarm(self, panicType):
"""Public method to raise a panic alarm."""
self.keypresses_to_partition(1, evl_PanicTypes[panicType])
def parseHandler(self, rawInput):
"""When the envisalink contacts us- parse out which command and data."""
cmd = {}
parse = re.match('([%\^].+)\$', rawInput)
if parse and parse.group(1):
# keep first sentinel char to tell difference between tpi and
# Envisalink command responses. Drop the trailing $ sentinel.
inputList = parse.group(1).split(',')
code = inputList[0]
cmd['code'] = code
cmd['data'] = ','.join(inputList[1:])
elif not self._loggedin:
# assume it is login info
code = rawInput
cmd['code'] = code
cmd['data'] = ''
else:
_LOGGER.error("Unrecognized data recieved from the envisalink. Ignoring.")
_LOGGER.debug(str.format("Code:{0} Data:{1}", code, cmd['data']))
try:
cmd['handler'] = "handle_%s" % evl_ResponseTypes[code]['handler']
cmd['callback'] = "callback_%s" % evl_ResponseTypes[code]['handler']
except KeyError:
_LOGGER.warning(str.format('No handler defined in config for {0}, skipping...', code))
return cmd
def handle_login(self, code, data):
"""When the envisalink asks us for our password- send it."""
self.send_data(self._alarmPanel.password)
def handle_command_response(self, code, data):
"""Handle the envisalink's initial response to our commands."""
responseString = evl_TPI_Response_Codes[data]
_LOGGER.debug("Envisalink response: " + responseString)
if data != '00':
logging.error("error sending command to envisalink. Response was: " + responseString)
def handle_poll_response(self, code, data):
"""Handle the response to our keepalive messages."""
self.handle_command_response(code, data)
def handle_keypad_update(self, code, data):
"""Handle the response to when the envisalink sends keypad updates our way."""
dataList = data.split(',')
# make sure data is in format we expect, current TPI seems to send bad data every so ofen
#TODO: Make this a regex...
if len(dataList) != 5 or "%" in data:
_LOGGER.error("Data format invalid from Envisalink, ignoring...")
return
partitionNumber = int(dataList[0])
flags = IconLED_Flags()
flags.asShort = int(dataList[1], 16)
beep = evl_Virtual_Keypad_How_To_Beep.get(dataList[3], 'unknown')
alpha = dataList[4]
_LOGGER.debug("Updating our local alarm state...")
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update({'alarm': bool(flags.alarm), 'alarm_in_memory': bool(flags.alarm_in_memory), 'armed_away': bool(flags.armed_away),
'ac_present': bool(flags.ac_present), 'armed_bypass': bool(flags.bypass), 'chime': bool(flags.chime),
'armed_zero_entry_delay': bool(flags.armed_zero_entry_delay), 'alarm_fire_zone': bool(flags.alarm_fire_zone),
'trouble': bool(flags.system_trouble), 'ready': bool(flags.ready), 'fire': bool(flags.fire),
'armed_stay': bool(flags.armed_stay),
'alpha': alpha,
'beep': beep,
})
_LOGGER.debug(json.dumps(self._alarmPanel.alarm_state['partition'][partitionNumber]['status']))
def handle_zone_state_change(self, code, data):
"""Handle when the envisalink sends us a zone change."""
# Envisalink TPI is inconsistent at generating these
bigEndianHexString = ''
# every four characters
inputItems = re.findall('....', data)
for inputItem in inputItems:
# Swap the couples of every four bytes
# (little endian to big endian)
swapedBytes = []
swapedBytes.insert(0, inputItem[0:2])
swapedBytes.insert(0, inputItem[2:4])
# add swapped set of four bytes to our return items,
# converting from hex to int
bigEndianHexString += ''.join(swapedBytes)
# convert hex string to 64 bit bitstring TODO: THIS IS 128 for evl4
if self._alarmPanel.envisalink_version < 4:
bitfieldString = str(bin(int(bigEndianHexString, 16))[2:].zfill(64))
else:
bitfieldString = str(bin(int(bigEndianHexString, 16))[2:].zfill(128))
# reverse every 16 bits so "lowest" zone is on the left
zonefieldString = ''
inputItems = re.findall('.' * 16, bitfieldString)
for inputItem in inputItems:
zonefieldString += inputItem[::-1]
for zoneNumber, zoneBit in enumerate(zonefieldString, start=1):
self._alarmPanel.alarm_state['zone'][zoneNumber]['status'].update({'open': zoneBit == '1', 'fault': zoneBit == '1'})
if zoneBit == '1':
self._alarmPanel.alarm_state['zone'][zoneNumber]['last_fault'] = 0
_LOGGER.debug("(zone %i) is %s", zoneNumber, "Open/Faulted" if zoneBit == '1' else "Closed/Not Faulted")
def handle_partition_state_change(self, code, data):
"""Handle when the envisalink sends us a partition change."""
for currentIndex in range(0, 8):
partitionStateCode = data[currentIndex * 2:(currentIndex * 2) + 2]
partitionState = evl_Partition_Status_Codes[str(partitionStateCode)]
partitionNumber = currentIndex + 1
previouslyArmed = self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].get('armed', False)
armed = partitionState['name'] in ('ARMED_STAY', 'ARMED_AWAY', 'ARMED_MAX')
self._alarmPanel.alarm_state.update({'arm': not armed, 'disarm': armed, 'cancel': bool(partitionState['name'] == 'EXIT_ENTRY_DELAY')})
self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update({'exit_delay': bool(partitionState['name'] == 'EXIT_ENTRY_DELAY' and not previouslyArmed),
'entry_delay': bool(partitionState['name'] == 'EXIT_ENTRY_DELAY' and previouslyArmed),
'armed': armed,
'ready': bool(partitionState['name'] == 'READY' or partitionState['name'] == 'READY_BYPASS')})
if partitionState['name'] == 'NOT_READY': self._alarmPanel.alarm_state['partition'][partitionNumber]['status'].update({'ready': False})
_LOGGER.debug('Parition ' + str(partitionNumber) + ' is in state ' + partitionState['name'])
_LOGGER.debug(json.dumps(self._alarmPanel.alarm_state['partition'][partitionNumber]['status']))
def handle_realtime_cid_event(self, code, data):
"""Handle when the envisalink sends us an alarm arm/disarm/trigger."""
eventTypeInt = int(data[0])
eventType = evl_CID_Qualifiers[eventTypeInt]
cidEventInt = int(data[1:4])
cidEvent = evl_CID_Events[cidEventInt]
partition = data[4:6]
zoneOrUser = int(data[6:9])
_LOGGER.debug('Event Type is ' + eventType)
_LOGGER.debug('CID Type is ' + cidEvent['type'])
_LOGGER.debug('CID Description is ' + cidEvent['label'])
_LOGGER.debug('Partition is ' + partition)
_LOGGER.debug(cidEvent['type'] + ' value is ' + str(zoneOrUser))
return cidEvent
| mit | -3,805,397,457,246,989,300 | 50.447761 | 198 | 0.582922 | false | 4.087352 | false | false | false |
caladrel/trueskill_kicker | league/migrations/0001_initial.py | 1 | 3950 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.core.validators
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Match',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('score_team1', models.PositiveSmallIntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], validators=[django.core.validators.MaxValueValidator(10)])),
('score_team2', models.PositiveSmallIntegerField(choices=[(0, 0), (1, 1), (2, 2), (3, 3), (4, 4), (5, 5), (6, 6), (7, 7), (8, 8), (9, 9), (10, 10)], validators=[django.core.validators.MaxValueValidator(10)])),
('timestamp', models.DateTimeField(auto_now_add=True, db_index=True)),
],
options={
'verbose_name_plural': 'matches',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Player',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200, db_index=True)),
('mu', models.FloatField(default=25.0)),
('sigma', models.FloatField(default=8.333333333333334)),
('rank', models.FloatField(default=0.0, db_index=True)),
('attacker_mu', models.FloatField(default=25.0)),
('attacker_sigma', models.FloatField(default=8.333333333333334)),
('attacker_rank', models.FloatField(default=0.0, db_index=True)),
('defender_mu', models.FloatField(default=25.0)),
('defender_sigma', models.FloatField(default=8.333333333333334)),
('defender_rank', models.FloatField(default=0.0, db_index=True)),
],
options={
'ordering': ['name'],
},
bases=(models.Model,),
),
migrations.CreateModel(
name='PlayerHistory',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('mu', models.FloatField(default=25.0)),
('sigma', models.FloatField(default=8.333333333333334)),
('rank', models.FloatField(default=0.0)),
('was_attacker', models.BooleanField(default=False)),
('seperate_mu', models.FloatField(default=25.0)),
('seperate_sigma', models.FloatField(default=8.333333333333334)),
('seperate_rank', models.FloatField(default=0.0)),
('match', models.ForeignKey(to='league.Match')),
('player', models.ForeignKey(to='league.Player')),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='match',
name='team1_player1',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='team1_player2',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='team2_player1',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
migrations.AddField(
model_name='match',
name='team2_player2',
field=models.ForeignKey(related_name='+', to='league.Player'),
preserve_default=True,
),
]
| apache-2.0 | 6,388,180,689,287,207,000 | 43.382022 | 225 | 0.533418 | false | 4.084798 | false | false | false |
zcbenz/cefode-chromium | tools/telemetry/telemetry/core/chrome/browser_backend.py | 1 | 7145 | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import urllib2
import httplib
import socket
import json
import re
import sys
from telemetry.core import util
from telemetry.core import exceptions
from telemetry.core import user_agent
from telemetry.core import wpr_modes
from telemetry.core import wpr_server
from telemetry.core.chrome import extension_dict_backend
from telemetry.core.chrome import tab_list_backend
from telemetry.core.chrome import tracing_backend
from telemetry.test import options_for_unittests
class ExtensionsNotSupportedException(Exception):
pass
class BrowserBackend(object):
"""A base class for browser backends. Provides basic functionality
once a remote-debugger port has been established."""
WEBPAGEREPLAY_HOST = '127.0.0.1'
def __init__(self, is_content_shell, supports_extensions, options):
self.browser_type = options.browser_type
self.is_content_shell = is_content_shell
self._supports_extensions = supports_extensions
self.options = options
self._browser = None
self._port = None
self._inspector_protocol_version = 0
self._chrome_branch_number = 0
self._webkit_base_revision = 0
self._tracing_backend = None
self.webpagereplay_local_http_port = util.GetAvailableLocalPort()
self.webpagereplay_local_https_port = util.GetAvailableLocalPort()
self.webpagereplay_remote_http_port = self.webpagereplay_local_http_port
self.webpagereplay_remote_https_port = self.webpagereplay_local_https_port
if options.dont_override_profile and not options_for_unittests.AreSet():
sys.stderr.write('Warning: Not overriding profile. This can cause '
'unexpected effects due to profile-specific settings, '
'such as about:flags settings, cookies, and '
'extensions.\n')
self._tab_list_backend = tab_list_backend.TabListBackend(self)
self._extension_dict_backend = None
if supports_extensions:
self._extension_dict_backend = \
extension_dict_backend.ExtensionDictBackend(self)
def SetBrowser(self, browser):
self._browser = browser
self._tab_list_backend.Init()
@property
def browser(self):
return self._browser
@property
def supports_extensions(self):
"""True if this browser backend supports extensions."""
return self._supports_extensions
@property
def tab_list_backend(self):
return self._tab_list_backend
@property
def extension_dict_backend(self):
return self._extension_dict_backend
def GetBrowserStartupArgs(self):
args = []
args.extend(self.options.extra_browser_args)
args.append('--disable-background-networking')
args.append('--metrics-recording-only')
args.append('--no-first-run')
if self.options.wpr_mode != wpr_modes.WPR_OFF:
args.extend(wpr_server.GetChromeFlags(
self.WEBPAGEREPLAY_HOST,
self.webpagereplay_remote_http_port,
self.webpagereplay_remote_https_port))
args.extend(user_agent.GetChromeUserAgentArgumentFromType(
self.options.browser_user_agent_type))
extensions = [extension.path for extension in
self.options.extensions_to_load if not extension.is_component]
extension_str = ','.join(extensions)
if len(extensions) > 0:
args.append('--load-extension=%s' % extension_str)
component_extensions = [extension.path for extension in
self.options.extensions_to_load if extension.is_component]
component_extension_str = ','.join(component_extensions)
if len(component_extensions) > 0:
args.append('--load-component-extension=%s' % component_extension_str)
return args
@property
def wpr_mode(self):
return self.options.wpr_mode
def _WaitForBrowserToComeUp(self, timeout=None):
def IsBrowserUp():
try:
self.Request('', timeout=timeout)
except (socket.error, httplib.BadStatusLine, urllib2.URLError):
return False
else:
return True
try:
util.WaitFor(IsBrowserUp, timeout=30)
except util.TimeoutException:
raise exceptions.BrowserGoneException()
def AllExtensionsLoaded():
for e in self.options.extensions_to_load:
if not e.extension_id in self._extension_dict_backend:
return False
extension_object = self._extension_dict_backend[e.extension_id]
extension_object.WaitForDocumentReadyStateToBeInteractiveOrBetter()
return True
if self._supports_extensions:
util.WaitFor(AllExtensionsLoaded, timeout=30)
def _PostBrowserStartupInitialization(self):
# Detect version information.
data = self.Request('version')
resp = json.loads(data)
if 'Protocol-Version' in resp:
self._inspector_protocol_version = resp['Protocol-Version']
if 'Browser' in resp:
branch_number_match = re.search('Chrome/\d+\.\d+\.(\d+)\.\d+',
resp['Browser'])
else:
branch_number_match = re.search(
'Chrome/\d+\.\d+\.(\d+)\.\d+ (Mobile )?Safari',
resp['User-Agent'])
webkit_version_match = re.search('\((trunk)?\@(\d+)\)',
resp['WebKit-Version'])
if branch_number_match:
self._chrome_branch_number = int(branch_number_match.group(1))
else:
# Content Shell returns '' for Browser, for now we have to
# fall-back and assume branch 1025.
self._chrome_branch_number = 1025
if webkit_version_match:
self._webkit_base_revision = int(webkit_version_match.group(2))
return
# Detection has failed: assume 18.0.1025.168 ~= Chrome Android.
self._inspector_protocol_version = 1.0
self._chrome_branch_number = 1025
self._webkit_base_revision = 106313
def Request(self, path, timeout=None):
url = 'http://localhost:%i/json' % self._port
if path:
url += '/' + path
req = urllib2.urlopen(url, timeout=timeout)
return req.read()
@property
def chrome_branch_number(self):
return self._chrome_branch_number
@property
def supports_tab_control(self):
return self._chrome_branch_number >= 1303
@property
def supports_tracing(self):
return self.is_content_shell or self._chrome_branch_number >= 1385
def StartTracing(self):
if self._tracing_backend is None:
self._tracing_backend = tracing_backend.TracingBackend(self._port)
self._tracing_backend.BeginTracing()
def StopTracing(self):
self._tracing_backend.EndTracing()
def GetTraceResultAndReset(self):
return self._tracing_backend.GetTraceResultAndReset()
def GetRemotePort(self, _):
return util.GetAvailableLocalPort()
def Close(self):
if self._tracing_backend:
self._tracing_backend.Close()
self._tracing_backend = None
def CreateForwarder(self, *port_pairs):
raise NotImplementedError()
def IsBrowserRunning(self):
raise NotImplementedError()
def GetStandardOutput(self):
raise NotImplementedError()
| bsd-3-clause | -2,395,234,556,662,840,000 | 32.70283 | 80 | 0.684955 | false | 3.965039 | false | false | false |
KazDragon/munin | conanfile.py | 1 | 2093 | from conans import ConanFile, CMake, tools
class MuninConan(ConanFile):
name = "munin"
license = "MIT"
author = "KazDragon"
url = "https://github.com/KazDragon/munin"
description = "A text-based gui component library build on Terminal++"
topics = ("ansi-escape-codes", "text-ui")
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "coverage": [True, False], "sanitize" : ["off", "address"]}
default_options = {"shared": False, "coverage": False, "sanitize": "off"}
exports = "*.hpp", "*.in", "*.cpp", "CMakeLists.txt", "*.md", "LICENSE"
requires = ("terminalpp/[>=2.0.1]@kazdragon/conan-public",
"nlohmann_json/[>=3.3.0]",
"boost/[>=1.69]")
build_requires = ("gtest/[>=1.8.1]")
generators = "cmake"
def imports(self):
# If Munin is built as shared, then running the tests will
# rely on the shared object for terminalpp being available
# in the same directory.
self.copy("*.so*", dst="", src="", keep_path=False, root_package="terminalpp")
def configure(self):
self.options["terminalpp"].shared = self.options.shared
def build(self):
cmake = CMake(self)
cmake.definitions["BUILD_SHARED_LIBS"] = self.options.shared
cmake.definitions["MUNIN_COVERAGE"] = self.options.coverage
cmake.definitions["MUNIN_SANITIZE"] = self.options.sanitize
cmake.configure()
cmake.build()
def package(self):
self.copy("*.hpp", dst="include", src="include")
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.dll", dst="bin", keep_path=False)
self.copy("*.so", dst="lib", keep_path=False)
self.copy("*.so.*", dst="lib", keep_path=False)
self.copy("*.dylib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
if self.settings.build_type == "Debug":
self.cpp_info.libs = ["munind"]
else:
self.cpp_info.libs = ["munin"]
| mit | 1,457,283,711,952,135,400 | 39.25 | 99 | 0.584329 | false | 3.425532 | false | false | false |
Zanzibar82/streamondemand.test | channels/cinestreaming01.py | 1 | 4463 | # -*- coding: utf-8 -*-
#------------------------------------------------------------
# streamondemand.- XBMC Plugin
# Canal para cinestreaming01.com
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand.
#------------------------------------------------------------
import urlparse,urllib2,urllib,re
import os, sys
from core import logger
from core import config
from core import scrapertools
from core.item import Item
from servers import servertools
__channel__ = "cinestreaming01"
__category__ = "F"
__type__ = "generic"
__title__ = "Cinestreaming01"
__language__ = "IT"
DEBUG = config.get_setting("debug")
sito="http://www.cinestreaming01.com"
def isGeneric():
return True
def mainlist(item):
logger.info("streamondemand.cinestreaming01 mainlist")
itemlist = []
itemlist.append( Item(channel=__channel__, title="[COLOR azure]Ultimi Film Inseriti[/COLOR]", action="peliculas", url=sito, thumbnail="http://dc584.4shared.com/img/XImgcB94/s7/13feaf0b538/saquinho_de_pipoca_01"))
itemlist.append( Item(channel=__channel__, title="[COLOR azure]Film Per Categoria[/COLOR]", action="categorias", url=sito, thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png"))
itemlist.append( Item(channel=__channel__, title="[COLOR yellow]Cerca...[/COLOR]", action="search", thumbnail="http://dc467.4shared.com/img/fEbJqOum/s7/13feaf0c8c0/Search"))
return itemlist
def categorias(item):
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
bloque = scrapertools.get_match(data,'<ul class="main-menu clearfix">(.*?)</ul>')
# Extrae las entradas (carpetas)
patron = '<li><a href="(.*?)">(.*?)</a></li>'
matches = re.compile(patron,re.DOTALL).findall(bloque)
scrapertools.printMatches(matches)
for scrapedurl,scrapedtitle in matches:
scrapedplot = ""
scrapedthumbnail = ""
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"]")
itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR azure]"+scrapedtitle+"[/COLOR]" , url=sito+scrapedurl , thumbnail="http://xbmc-repo-ackbarr.googlecode.com/svn/trunk/dev/skin.cirrus%20extended%20v2/extras/moviegenres/All%20Movies%20by%20Genre.png", folder=True) )
return itemlist
def search(item,texto):
logger.info("[cinestreaming01.py] "+item.url+" search "+texto)
item.url = "http://cinestreaming01.com/?s="+texto
try:
return peliculas(item)
# Se captura la excepción, para no interrumpir al buscador global si un canal falla
except:
import sys
for line in sys.exc_info():
logger.error( "%s" % line )
return []
def peliculas(item):
logger.info("streamondemand.cinestreaming01 peliculas")
itemlist = []
# Descarga la pagina
data = scrapertools.cache_page(item.url)
# Extrae las entradas (carpetas)
#patron = '<div class="boxim">\s*'
patron = '<div class="box " id="post-.*?">.*?<a href="(.*?)"><img class="boximg" src="http://cinestreaming01.com/wp-content/themes/Boxoffice/timthumb.php?src=(.*?)&h=270&w=180&zc=1" alt=""/></a>\s*'
patron += '<h2><a href=".*?" rel="bookmark" title=".*?">(.*?)</a></h2>'
matches = re.compile(patron,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
#scrapedtitle=scrapertools.decodeHtmlentities(scrapedtitle.replace("Streaming e download ita ",""))
if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
itemlist.append( Item(channel=__channel__, action="findvideos", title="[COLOR azure]"+scrapedtitle+"[/COLOR]" , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=True) )
# Extrae el paginador
patronvideos = '<span class="pnext"><a href="(.*?)">Avanti</a></span>'
matches = re.compile(patronvideos,re.DOTALL).findall(data)
scrapertools.printMatches(matches)
if len(matches)>0:
scrapedurl = urlparse.urljoin(item.url,matches[0])
itemlist.append( Item(channel=__channel__, action="peliculas", title="[COLOR orange]Successivo>>[/COLOR]" , url=scrapedurl , thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png", folder=True) )
return itemlist
| gpl-3.0 | -8,911,400,560,965,957,000 | 43.62 | 299 | 0.6645 | false | 3.235678 | false | false | false |
serge-sans-paille/pythran | pythran/tests/cases/stone.py | 1 | 5387 | #pythran export whetstone(int)
#runas whetstone(2*10**2)
#bench whetstone(1500)
"""
/*
* C Converted Whetstone Double Precision Benchmark
* Version 1.2 22 March 1998
*
* (c) Copyright 1998 Painter Engineering, Inc.
* All Rights Reserved.
*
* Permission is granted to use, duplicate, and
* publish this text and program as long as it
* includes this entire comment block and limited
* rights reference.
*
* Converted by Rich Painter, Painter Engineering, Inc. based on the
* www.netlib.org benchmark/whetstoned version obtained 16 March 1998.
*
* A novel approach was used here to keep the look and feel of the
* FORTRAN version. Altering the FORTRAN-based array indices,
* starting at element 1, to start at element 0 for C, would require
* numerous changes, including decrementing the variable indices by 1.
* Instead, the array E1[] was declared 1 element larger in C. This
* allows the FORTRAN index range to function without any literal or
* variable indices changes. The array element E1[0] is simply never
* used and does not alter the benchmark results.
*
* The major FORTRAN comment blocks were retained to minimize
* differences between versions. Modules N5 and N12, like in the
* FORTRAN version, have been eliminated here.
*
* An optional command-line argument has been provided [-c] to
* offer continuous repetition of the entire benchmark.
* An optional argument for setting an alternate LOOP count is also
* provided. Define PRINTOUT to cause the POUT() function to print
* outputs at various stages. Final timing measurements should be
* made with the PRINTOUT undefined.
*
* Questions and comments may be directed to the author at
* [email protected]
*/
"""
from math import sin as DSIN, cos as DCOS, atan as DATAN, log as DLOG, exp as DEXP, sqrt as DSQRT
def whetstone(loopstart):
# The actual benchmark starts here.
T = .499975;
T1 = 0.50025;
T2 = 2.0;
# With loopcount LOOP=10, one million Whetstone instructions
# will be executed in EACH MAJOR LOOP..A MAJOR LOOP IS EXECUTED
# 'II' TIMES TO INCREASE WALL-CLOCK TIMING ACCURACY.
LOOP = loopstart;
II = 1;
JJ = 1;
while JJ <= II:
N1 = 0;
N2 = 12 * LOOP;
N3 = 14 * LOOP;
N4 = 345 * LOOP;
N6 = 210 * LOOP;
N7 = 32 * LOOP;
N8 = 899 * LOOP;
N9 = 616 * LOOP;
N10 = 0;
N11 = 93 * LOOP;
# Module 1: Simple identifiers
X1 = 1.0;
X2 = -1.0;
X3 = -1.0;
X4 = -1.0;
for I in range(1,N1+1):
X1 = (X1 + X2 + X3 - X4) * T;
X2 = (X1 + X2 - X3 + X4) * T;
X3 = (X1 - X2 + X3 + X4) * T;
X4 = (-X1+ X2 + X3 + X4) * T;
# Module 2: Array elements
E1 = [ 1.0, -1.0, -1.0, -1.0 ]
for I in range(1,N2+1):
E1[0] = ( E1[0] + E1[1] + E1[2] - E1[3]) * T;
E1[1] = ( E1[0] + E1[1] - E1[2] + E1[3]) * T;
E1[2] = ( E1[0] - E1[1] + E1[2] + E1[3]) * T;
E1[3] = (-E1[0] + E1[1] + E1[2] + E1[3]) * T;
# Module 3: Array as parameter
for I in range(1,N3+1):
PA(E1, T, T2);
# Module 4: Conditional jumps
J = 1;
for I in range(1,N4+1):
if J == 1:
J = 2;
else:
J = 3;
if J > 2:
J = 0;
else:
J = 1;
if J < 1:
J = 1;
else:
J = 0;
# Module 5: Omitted
# Module 6: Integer arithmetic
J = 1;
K = 2;
L = 3;
for I in range(1,N6+1):
J = J * (K-J) * (L-K);
K = L * K - (L-J) * K;
L = (L-K) * (K+J);
E1[L-2] = J + K + L;
E1[K-2] = J * K * L;
# Module 7: Trigonometric functions
X = 0.5;
Y = 0.5;
for I in range(1,N7+1):
X = T * DATAN(T2*DSIN(X)*DCOS(X)/(DCOS(X+Y)+DCOS(X-Y)-1.0));
Y = T * DATAN(T2*DSIN(Y)*DCOS(Y)/(DCOS(X+Y)+DCOS(X-Y)-1.0));
# Module 8: Procedure calls
X = 1.0;
Y = 1.0;
Z = 1.0;
for I in range(1,N8+1):
Z=P3(X,Y,T, T2)
# Module 9: Array references
J = 1;
K = 2;
L = 3;
E1[0] = 1.0;
E1[1] = 2.0;
E1[2] = 3.0;
for I in range(1,N9+1):
P0(E1, J, K, L)
# Module 10: Integer arithmetic
J = 2;
K = 3;
for I in range(1,N10+1):
J = J + K;
K = J + K;
J = K - J;
K = K - J - J;
# Module 11: Standard functions
X = 0.75;
for I in range(1,N11+1):
X = DSQRT(DEXP(DLOG(X)/T1));
JJ+=1
KIP = (100.0*LOOP*II)
return KIP
def PA(E, T, T2):
J = 0;
while J<6:
E[0] = ( E[0] + E[1] + E[2] - E[3]) * T;
E[1] = ( E[0] + E[1] - E[2] + E[3]) * T;
E[2] = ( E[0] - E[1] + E[2] + E[3]) * T;
E[3] = (-E[0] + E[1] + E[2] + E[3]) / T2;
J += 1;
def P0(E1, J, K, L):
E1[J-1] = E1[K-1];
E1[K-1] = E1[L-1];
E1[L-1] = E1[J-1];
def P3(X, Y, T, T2):
X1 = X;
Y1 = Y;
X1 = T * (X1 + Y1);
Y1 = T * (X1 + Y1);
return (X1 + Y1) / T2;
| bsd-3-clause | 8,200,889,701,506,693,000 | 24.899038 | 97 | 0.484685 | false | 2.75409 | false | false | false |
Socialsquare/RunningCause | challenges/tasks.py | 1 | 1414 | # coding: utf8
from __future__ import absolute_import
import datetime
from celery import shared_task
from celery.utils.log import get_task_logger
from django.utils.translation import ugettext as _
from django.core.mail import send_mail
from django.contrib.auth import get_user_model
from django.conf import settings
from django.template import loader, Context
from common.helpers import send_email
from .models import Challenge
log = get_task_logger(__name__)
def send_challenge_reminder(user_id):
user = get_user_model().objects.get(id=user_id)
today = datetime.date.today()
filters = {
'status': Challenge.ACTIVE,
'end_date': today
}
ending_challenges = user.challenges_recieved.filter(**filters)
email_subject = _('Challenge ends today!')
email_context = {
'ending_challenges': ending_challenges
}
send_email([user.email],
email_subject,
'challenges/emails/challenges_reminder.html',
email_context)
@shared_task(ignore_result=True)
def send_challenge_reminders():
# Fetch runners that has challenges ending today.
today = datetime.date.today()
filters = {
'is_active': True,
'challenges_recieved__end_date': today
}
relevant_runners = get_user_model().objects.filter(**filters)
for runner in relevant_runners:
send_challenge_reminder(runner.id)
| mit | -5,044,719,315,472,939,000 | 25.679245 | 66 | 0.683876 | false | 3.750663 | false | false | false |
JTarball/tetherbox | docker/app/app/backend/apps/services/migrations/0001_initial.py | 1 | 3463 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0002_auto_20150616_2121'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Action',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('action_id', models.IntegerField()),
],
),
migrations.CreateModel(
name='Service',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(unique=True, max_length=255)),
('description', models.CharField(max_length=255)),
('status', models.SmallIntegerField(default=0, choices=[(0, b'Disabled'), (1, b'Coming Soon'), (2, b'Beta'), (3, b'Enabled')])),
],
),
migrations.CreateModel(
name='Tether',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('enabled', models.BooleanField(default=True, help_text='Designates whether the a web trigger-action is enabled.')),
('actions', models.ManyToManyField(to='services.Action')),
('tags', taggit.managers.TaggableManager(to='taggit.Tag', through='taggit.TaggedItem', blank=True, help_text='A comma-separated list of tags.', verbose_name='Tags')),
],
),
migrations.CreateModel(
name='Trigger',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('description', models.CharField(max_length=255)),
('trigger_id', models.IntegerField()),
('service', models.ForeignKey(to='services.Service')),
],
),
migrations.CreateModel(
name='UserService',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('token', models.CharField(max_length=255)),
('service', models.ForeignKey(related_name='+', to='services.Service', to_field=b'name')),
('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='tether',
name='trigger',
field=models.ForeignKey(to='services.Trigger'),
),
migrations.AddField(
model_name='tether',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL),
),
migrations.AddField(
model_name='action',
name='service',
field=models.ForeignKey(to='services.Service'),
),
]
| isc | -504,606,766,729,863,940 | 42.2875 | 182 | 0.556743 | false | 4.405852 | false | false | false |
david-martin/atomic-reactor | tests/test_inner.py | 1 | 20378 | """
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from atomic_reactor.build import InsideBuilder
from atomic_reactor.util import ImageName
from atomic_reactor.plugin import (PreBuildPlugin, PrePublishPlugin, PostBuildPlugin, ExitPlugin,
AutoRebuildCanceledException)
from atomic_reactor.plugin import PluginFailedException
import atomic_reactor.plugin
import logging
from flexmock import flexmock
import pytest
from tests.constants import MOCK_SOURCE, SOURCE
from tests.docker_mock import mock_docker
import inspect
from atomic_reactor.inner import BuildResults, BuildResultsEncoder, BuildResultsJSONDecoder
from atomic_reactor.inner import DockerBuildWorkflow
BUILD_RESULTS_ATTRS = ['build_logs',
'built_img_inspect',
'built_img_info',
'base_img_info',
'base_plugins_output',
'built_img_plugins_output']
def test_build_results_encoder():
results = BuildResults()
expected_data = {}
for attr in BUILD_RESULTS_ATTRS:
setattr(results, attr, attr)
expected_data[attr] = attr
data = json.loads(json.dumps(results, cls=BuildResultsEncoder))
assert data == expected_data
def test_build_results_decoder():
data = {}
expected_results = BuildResults()
for attr in BUILD_RESULTS_ATTRS:
setattr(expected_results, attr, attr)
data[attr] = attr
results = json.loads(json.dumps(data), cls=BuildResultsJSONDecoder)
for attr in set(BUILD_RESULTS_ATTRS) - set(['build_logs']):
assert getattr(results, attr) == getattr(expected_results, attr)
class MockDockerTasker(object):
def inspect_image(self, name):
return {}
class X(object):
pass
class MockInsideBuilder(object):
def __init__(self, failed=False):
self.tasker = MockDockerTasker()
self.base_image = ImageName(repo='Fedora', tag='22')
self.image_id = 'asd'
self.failed = failed
@property
def source(self):
result = X()
setattr(result, 'dockerfile_path', '/')
setattr(result, 'path', '/tmp')
return result
def pull_base_image(self, source_registry, insecure=False):
pass
def build(self):
result = X()
setattr(result, 'logs', None)
setattr(result, 'is_failed', lambda: self.failed)
return result
def inspect_built_image(self):
return None
class RaisesMixIn(object):
"""
Mix-in class for plugins that should raise exceptions.
"""
is_allowed_to_fail = False
def __init__(self, tasker, workflow, *args, **kwargs):
super(RaisesMixIn, self).__init__(tasker, workflow,
*args, **kwargs)
def run(self):
raise RuntimeError
class PreRaises(RaisesMixIn, PreBuildPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'pre_raises'
class PostRaises(RaisesMixIn, PostBuildPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'post_raises'
class PrePubRaises(RaisesMixIn, PrePublishPlugin):
"""
This plugin must run and cause the build to abort.
"""
key = 'prepub_raises'
class WatchedMixIn(object):
"""
Mix-in class for plugins we want to watch.
"""
def __init__(self, tasker, workflow, watcher, *args, **kwargs):
super(WatchedMixIn, self).__init__(tasker, workflow,
*args, **kwargs)
self.watcher = watcher
def run(self):
self.watcher.call()
class PreWatched(WatchedMixIn, PreBuildPlugin):
"""
A PreBuild plugin we can watch.
"""
key = 'pre_watched'
class PrePubWatched(WatchedMixIn, PrePublishPlugin):
"""
A PrePublish plugin we can watch.
"""
key = 'prepub_watched'
class PostWatched(WatchedMixIn, PostBuildPlugin):
"""
A PostBuild plugin we can watch.
"""
key = 'post_watched'
class ExitWatched(WatchedMixIn, ExitPlugin):
"""
An Exit plugin we can watch.
"""
key = 'exit_watched'
class ExitRaises(RaisesMixIn, ExitPlugin):
"""
An Exit plugin that should raise an exception.
"""
key = 'exit_raises'
class ExitCompat(WatchedMixIn, ExitPlugin):
"""
An Exit plugin called as a Post-build plugin.
"""
key = 'store_logs_to_file'
class Watcher(object):
def __init__(self):
self.called = False
def call(self):
self.called = True
def was_called(self):
return self.called
def test_workflow():
"""
Test normal workflow.
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'pre_watched',
'args': {
'watcher': watch_pre
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_pre.was_called()
assert watch_prepub.was_called()
assert watch_post.was_called()
assert watch_exit.was_called()
class FakeLogger(object):
def __init__(self):
self.debugs = []
self.infos = []
self.warnings = []
self.errors = []
def log(self, logs, args):
logs.append(args)
def debug(self, *args):
self.log(self.debugs, args)
def info(self, *args):
self.log(self.infos, args)
def warning(self, *args):
self.log(self.warnings, args)
def error(self, *args):
self.log(self.errors, args)
def test_workflow_compat():
"""
Some of our plugins have changed from being run post-build to
being run at exit. Let's test what happens when we try running an
exit plugin as a post-build plugin.
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
fake_logger = FakeLogger()
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
postbuild_plugins=[{'name': 'store_logs_to_file',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
workflow.build_docker_image()
assert watch_exit.was_called()
assert len(fake_logger.errors) > 0
class Pre(PreBuildPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'pre'
class Post(PostBuildPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'post'
class Exit(ExitPlugin):
"""
This plugin does nothing. It's only used for configuration testing.
"""
key = 'exit'
@pytest.mark.parametrize(('plugins', 'should_fail', 'should_log'), [
# No 'name' key, prebuild
({
'prebuild_plugins': [{'args': {}},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, postbuild
({
'postbuild_plugins': [{'args': {}},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}],
},
True, # is fatal
True, # logs error
),
# No 'name' key, exit
({
'exit_plugins': [{'args': {}},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}],
},
False, # not fatal
True, # logs error
),
# No 'args' key, prebuild
({'prebuild_plugins': [{'name': 'pre'},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No 'args' key, postbuild
({'postbuild_plugins': [{'name': 'post'},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal,
False, # no error logged
),
# No 'args' key, exit
({'exit_plugins': [{'name': 'exit'},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
False, # no error logged
),
# No such plugin, prebuild
({'prebuild_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'pre_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, postbuild
({'postbuild_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'post_watched',
'args': {
'watcher': Watcher(),
}
}]},
True, # is fatal
True, # logs error
),
# No such plugin, exit
({'exit_plugins': [{'name': 'no plugin',
'args': {}},
{'name': 'exit_watched',
'args': {
'watcher': Watcher(),
}
}]},
False, # not fatal
True, # logs error
),
])
def test_plugin_errors(plugins, should_fail, should_log):
"""
Try bad plugin configuration.
"""
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
fake_logger = FakeLogger()
atomic_reactor.plugin.logger = fake_logger
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
plugin_files=[this_file],
**plugins)
# Find the 'watcher' parameter
watchers = [conf.get('args', {}).get('watcher')
for plugin in plugins.values()
for conf in plugin]
watcher = [x for x in watchers if x][0]
if should_fail:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
assert not watcher.was_called()
else:
workflow.build_docker_image()
assert watcher.was_called()
if should_log:
assert len(fake_logger.errors) > 0
else:
assert len(fake_logger.errors) == 0
class StopAutorebuildPlugin(PreBuildPlugin):
key = 'stopstopstop'
def run(self):
raise AutoRebuildCanceledException(self.key, 'message')
def test_autorebuild_stop_prevents_build():
"""
test that a plugin that raises AutoRebuildCanceledException results in actually skipped build
"""
this_file = inspect.getfile(PreWatched)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=[{'name': 'stopstopstop',
'args': {
}}],
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
with pytest.raises(AutoRebuildCanceledException):
workflow.build_docker_image()
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
assert workflow.autorebuild_canceled == True
@pytest.mark.parametrize('fail_at', ['pre', 'prepub', 'post', 'exit'])
def test_workflow_plugin_error(fail_at):
"""
This is a test for what happens when plugins fail.
When a prebuild or postbuild plugin fails, and doesn't have
is_allowed_to_fail=True set, the whole build should fail.
However, all the exit plugins should run.
"""
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_pre = Watcher()
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
prebuild_plugins = [{'name': 'pre_watched',
'args': {
'watcher': watch_pre,
}}]
prepublish_plugins = [{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}]
postbuild_plugins = [{'name': 'post_watched',
'args': {
'watcher': watch_post
}}]
exit_plugins = [{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}]
# Insert a failing plugin into one of the build phases
if fail_at == 'pre':
prebuild_plugins.insert(0, {'name': 'pre_raises', 'args': {}})
elif fail_at == 'prepub':
prepublish_plugins.insert(0, {'name': 'prepub_raises', 'args': {}})
elif fail_at == 'post':
postbuild_plugins.insert(0, {'name': 'post_raises', 'args': {}})
elif fail_at == 'exit':
exit_plugins.insert(0, {'name': 'exit_raises', 'args': {}})
else:
# Typo in the parameter list?
assert False
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prebuild_plugins=prebuild_plugins,
prepublish_plugins=prepublish_plugins,
postbuild_plugins=postbuild_plugins,
exit_plugins=exit_plugins,
plugin_files=[this_file])
# Failures in any phase except 'exit' cause the build process to
# abort.
if fail_at == 'exit':
workflow.build_docker_image()
else:
with pytest.raises(PluginFailedException):
workflow.build_docker_image()
# The pre-build phase should only complete if there were no
# earlier plugin failures.
assert watch_pre.was_called() == (fail_at != 'pre')
# The prepublish phase should only complete if there were no
# earlier plugin failures.
assert watch_prepub.was_called() == (fail_at not in ('pre', 'prepub'))
# The post-build phase should only complete if there were no
# earlier plugin failures.
assert watch_post.was_called() == (fail_at not in ('pre', 'prepub', 'post'))
# But all exit plugins should run, even if one of them also raises
# an exception.
assert watch_exit.was_called()
def test_workflow_docker_build_error():
"""
This is a test for what happens when the docker build fails.
"""
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder(failed=True)
flexmock(InsideBuilder).new_instances(fake_builder)
watch_prepub = Watcher()
watch_post = Watcher()
watch_exit = Watcher()
workflow = DockerBuildWorkflow(MOCK_SOURCE, 'test-image',
prepublish_plugins=[{'name': 'prepub_watched',
'args': {
'watcher': watch_prepub,
}}],
postbuild_plugins=[{'name': 'post_watched',
'args': {
'watcher': watch_post
}}],
exit_plugins=[{'name': 'exit_watched',
'args': {
'watcher': watch_exit
}}],
plugin_files=[this_file])
assert workflow.build_docker_image().is_failed()
# No subsequent build phases should have run except 'exit'
assert not watch_prepub.was_called()
assert not watch_post.was_called()
assert watch_exit.was_called()
class ExitUsesSource(ExitWatched):
key = 'uses_source'
def run(self):
assert os.path.exists(self.workflow.source.get_dockerfile_path()[0])
WatchedMixIn.run(self)
def test_source_not_removed_for_exit_plugins():
this_file = inspect.getfile(PreRaises)
mock_docker()
fake_builder = MockInsideBuilder()
flexmock(InsideBuilder).new_instances(fake_builder)
watch_exit = Watcher()
workflow = DockerBuildWorkflow(SOURCE, 'test-image',
exit_plugins=[{'name': 'uses_source',
'args': {
'watcher': watch_exit,
}}],
plugin_files=[this_file])
workflow.build_docker_image()
# Make sure that the plugin was actually run
assert watch_exit.was_called()
| bsd-3-clause | -4,677,298,146,013,047,000 | 30.159021 | 97 | 0.48464 | false | 4.598962 | true | false | false |
migasfree/migasfree-backend | migasfree/client/models/error.py | 1 | 4593 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2021 Jose Antonio Chavarría <[email protected]>
# Copyright (c) 2015-2021 Alberto Gacías <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.db import models
from django.db.models.aggregates import Count
from django.utils.translation import gettext_lazy as _
from ...core.models import Project
from .computer import Computer
from .event import Event
class DomainErrorManager(models.Manager):
def get_queryset(self):
return super().get_queryset().select_related(
'project',
'computer',
'computer__project',
'computer__sync_user',
)
def scope(self, user):
qs = self.get_queryset()
if not user.is_view_all():
qs = qs.filter(
project_id__in=user.get_projects(),
computer_id__in=user.get_computers()
)
return qs
class UncheckedManager(DomainErrorManager):
def get_queryset(self):
return super().get_queryset().filter(checked=0)
def scope(self, user):
return super().scope(user).filter(checked=0)
class ErrorManager(DomainErrorManager):
def create(self, computer, project, description):
obj = Error()
obj.computer = computer
obj.project = project
obj.description = description
obj.save()
return obj
class Error(Event):
description = models.TextField(
verbose_name=_("description"),
null=True,
blank=True
)
checked = models.BooleanField(
verbose_name=_("checked"),
default=False,
)
project = models.ForeignKey(
Project,
on_delete=models.CASCADE,
verbose_name=_("project")
)
objects = ErrorManager()
unchecked = UncheckedManager()
@staticmethod
def unchecked_count(user=None):
if not user:
return Error.unchecked.count()
return Error.unchecked.scope(user).count()
@staticmethod
def unchecked_by_project(user):
total = Error.unchecked_count(user)
projects = list(Error.unchecked.scope(user).values(
'project__name',
'project__id',
'project__platform__id',
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
platforms = list(Error.unchecked.scope(user).values(
'project__platform__id',
'project__platform__name'
).annotate(
count=Count('id')
).order_by('project__platform__id', '-count'))
return {
'total': total,
'inner': platforms,
'outer': projects,
}
@staticmethod
def status_by_project(user):
total = Error.objects.scope(user).count()
projects = list(Error.objects.scope(user).values(
'computer__status',
'project__id',
'project__name',
).annotate(
count=Count('id')
).order_by('computer__status', '-count'))
status = list(Error.objects.scope(user).values(
'computer__status',
).annotate(
count=Count('id')
).order_by('computer__status', '-count'))
for item in status:
item['status'] = item.get('computer__status')
item['computer__status'] = _(dict(Computer.STATUS_CHOICES)[item.get('computer__status')])
return {
'total': total,
'inner': status,
'outer': projects,
}
def checked_ok(self):
self.checked = True
self.save()
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
self.description = self.description.replace("\r\n", "\n")
super().save(force_insert, force_update, using, update_fields)
class Meta:
app_label = 'client'
verbose_name = _('Error')
verbose_name_plural = _('Errors')
| gpl-3.0 | 349,263,582,367,790,340 | 27.515528 | 101 | 0.597691 | false | 4.110116 | false | false | false |
stscieisenhamer/glue | glue/utils/tests/test_matplotlib.py | 2 | 5186 |
from __future__ import absolute_import, division, print_function
import pytest
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
from matplotlib.artist import Artist
from numpy.testing import assert_allclose
from matplotlib.backends.backend_agg import FigureCanvasAgg
from glue.tests.helpers import requires_scipy
from glue.utils.misc import DeferredMethod
from ..matplotlib import (point_contour, fast_limits, all_artists, new_artists,
remove_artists, view_cascade, get_extent, color2rgb,
defer_draw, freeze_margins)
@requires_scipy
class TestPointContour(object):
def test(self):
data = np.array([[0, 0, 0, 0],
[0, 2, 3, 0],
[0, 4, 2, 0],
[0, 0, 0, 0]])
xy = point_contour(2, 2, data)
x = np.array([2., 2. + 1. / 3., 2., 2., 1, .5, 1, 1, 2])
y = np.array([2. / 3., 1., 2., 2., 2.5, 2., 1., 1., 2. / 3])
np.testing.assert_array_almost_equal(xy[:, 0], x)
np.testing.assert_array_almost_equal(xy[:, 1], y)
def test_fast_limits_nans():
x = np.zeros((10, 10)) * np.nan
assert_allclose(fast_limits(x, 0, 1), [0, 1])
def test_single_value():
x = np.array([1])
assert_allclose(fast_limits(x, 5., 95.), [1, 1])
def test_artist_functions():
c1 = Circle((0, 0), radius=1)
c2 = Circle((1, 0), radius=1)
c3 = Circle((2, 0), radius=1)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.add_patch(c1)
ax.add_patch(c2)
assert all_artists(fig) == set([c1, c2])
ax.add_patch(c3)
assert new_artists(fig, set([c1, c2])) == set([c3])
remove_artists([c2])
assert all_artists(fig) == set([c1, c3])
# check that it can deal with being passed the same artist twice
remove_artists([c1, c1])
assert all_artists(fig) == set([c3])
def test_get_extent():
assert get_extent((slice(0, 5, 1), slice(0, 10, 2))) == (0, 10, 0, 5)
assert get_extent((slice(0, 5, 1), slice(0, 10, 2)), transpose=True) == (0, 5, 0, 10)
def test_view_cascade():
data = np.zeros((100, 100))
v2, view = view_cascade(data, (slice(0, 5, 1), slice(0, 5, 1)))
assert v2 == ((slice(0, 100, 20), slice(0, 100, 20)))
assert view == (slice(0, 5, 1), slice(0, 5, 1))
v2, view = view_cascade(data, (3, slice(0, 5, 1)))
assert v2 == ((3, slice(0, 100, 20)))
assert view == (3, slice(0, 5, 1))
def test_defer_draw():
@defer_draw
def draw_figure():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [4, 5, 6])
fig.canvas.draw()
return 3.5
result = draw_figure()
# Make sure that the return value was passed through correctly
assert result == 3.5
def test_defer_draw_exception():
# Regression test for a bug that meant that if an exception happened during
# drawing, the draw method was not restored correctly
# Make sure we start off with a clean draw method
assert not isinstance(FigureCanvasAgg.draw, DeferredMethod)
class ProblematicArtist(Artist):
def draw(self, *args, **kwargs):
raise ValueError('You shall not pass!')
@defer_draw
def draw_figure():
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.add_artist(ProblematicArtist())
fig.canvas.draw()
with pytest.raises(ValueError) as exc:
result = draw_figure()
assert exc.value.args[0] == 'You shall not pass!'
# Make sure that draw is no longer a deferred method
assert not isinstance(FigureCanvasAgg.draw, DeferredMethod)
@pytest.mark.parametrize(('color', 'rgb'),
(('red', (1, 0, 0)), ('green', (0, 0.5020, 0)), ('orange', (1., 0.6470, 0.))))
def test_color2rgb(color, rgb):
assert_allclose(color2rgb(color), rgb, atol=0.001)
def test_freeze_margins():
fig = plt.figure(figsize=(4, 4))
ax = fig.add_subplot(1, 1, 1)
freeze_margins(ax, margins=[1, 1, 1, 1])
# Note, we don't test the following since the defaults change depending
# on the Matplotlib version
# bbox = ax.get_position()
# np.testing.assert_allclose(bbox.x0, 0.125)
# np.testing.assert_allclose(bbox.y0, 0.1)
# np.testing.assert_allclose(bbox.x1, 0.9)
# np.testing.assert_allclose(bbox.y1, 0.9)
fig.canvas.resize_event()
bbox = ax.get_position()
np.testing.assert_allclose(bbox.x0, 0.25)
np.testing.assert_allclose(bbox.y0, 0.25)
np.testing.assert_allclose(bbox.x1, 0.75)
np.testing.assert_allclose(bbox.y1, 0.75)
fig.set_size_inches(8, 8)
fig.canvas.resize_event()
bbox = ax.get_position()
np.testing.assert_allclose(bbox.x0, 0.125)
np.testing.assert_allclose(bbox.y0, 0.125)
np.testing.assert_allclose(bbox.x1, 0.875)
np.testing.assert_allclose(bbox.y1, 0.875)
ax.resizer.margins = [0, 1, 2, 4]
fig.canvas.resize_event()
bbox = ax.get_position()
np.testing.assert_allclose(bbox.x0, 0.)
np.testing.assert_allclose(bbox.y0, 0.25)
np.testing.assert_allclose(bbox.x1, 0.875)
np.testing.assert_allclose(bbox.y1, 0.5)
| bsd-3-clause | 2,965,087,516,347,311,000 | 28.299435 | 103 | 0.604126 | false | 3.047004 | true | false | false |
wooey/Wooey | wooey/migrations/0018_userfile.py | 1 | 1037 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wooey.models.mixins
class Migration(migrations.Migration):
dependencies = [
('wooey', '0017_wooeyfile_generate_checksums'),
]
operations = [
migrations.CreateModel(
name='UserFile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('filename', models.TextField()),
('job', models.ForeignKey(to='wooey.WooeyJob', on_delete=models.CASCADE)),
('parameter', models.ForeignKey(blank=True, to='wooey.ScriptParameters', null=True, on_delete=models.CASCADE)),
],
bases=(wooey.models.mixins.WooeyPy2Mixin, models.Model),
),
migrations.AddField(
model_name='userfile',
name='system_file',
field=models.ForeignKey(to='wooey.WooeyFile', on_delete=models.CASCADE),
),
]
| bsd-3-clause | 1,396,863,322,107,834,600 | 33.566667 | 127 | 0.594986 | false | 3.942966 | false | false | false |
jfalkner/report_data | report_data/check.py | 1 | 3654 | from datetime import datetime
from datetime import timedelta
from datetime import date
class SanityChecker:
"""Validate input from URL arguments.
This helps keep code that needs to check input succinct and consistent
across the various reports. The main purpose for encapsulating these
methods is so that errors can optionally be thrown immediately or buffered
and thrown in aggregate. The latter option is default because it is often
most helpful to see the full list of issues all at once versus, for
example, showing the first error, having a user fix it, then repeating
the cycle for all other errors hidden by the first.
"""
def __init__(self):
self.errors = []
self.raise_error = False
def reset(self):
del self.errors[:]
def if_any_errors_raise_aggregate(self):
if not self.errors:
return
error_count = len(self.errors)
error_message = ""
if error_count > 1:
error_message += '%s values need to be corrected.' % error_count
raise AssertionError('%s\n\n%s' % (error_message,
',\n'.join(self.errors)))
def add_or_raise(self, error_message):
"""Either buffer or immediately raise a filter value error."""
if self.raise_error:
raise AssertionError(error_message)
else:
if not isinstance(error_message, basestring):
raise ValueError('Error messages must be strings.')
self.errors.append(error_message)
return None
def if_required(self, name, value, required):
if required and value is None:
self.add_or_raise("Must have value for '%s'." % name)
def date(self, values, name, required=False):
# Check for non-null value.
if name not in values:
if required:
return self.add_or_raise(
'Missing ISO 8601 date value for key "%s". '
'e.g. "2013-06-01" for June 1st 2013.' % name)
else:
return
# Check for expected ISO 8601 format.
try:
date_val = values[name]
if type(date_val) == date:
return date_val
return datetime.strptime(date, '%Y-%m-%d')
except ValueError:
return self.add_or_raise(
'Invalid date value: "%s". Expected ISO 8601 format. '
'e.g. "20130601" for June 1st 2013.')
def date_range(self, start_date, end_date):
# If no range, default to two full weeks plus days from this week.
if not start_date and not end_date:
now = datetime.now()
start_day = datetime.now() - timedelta(days=14)
while start_day.weekday() != 0:
start_day -= timedelta(days=1)
return (start_day, now)
# Sanity check both start and end dates exist.
if (start_date and not end_date):
self.add_or_raise('Found a start date but no end. Must have both '
'if you are specifying a date range.')
if (not start_date and end_date):
self.add_or_raise('Found a end date but no start. Must have both '
'if you are specifying a date range.')
# Sanity check that the start is before end date.
if start_date and end_date:
if end_date <= start_date:
self.add_or_raise(
'End date "%s" must be after start date "%s".' %
(start_date, end_date))
return start_date, end_date
| mit | 3,428,162,843,007,612,000 | 40.05618 | 78 | 0.574165 | false | 4.344828 | false | false | false |
CMacKinnon101/pokemon-python-api-adapter | get_cards_from_sets.py | 1 | 3637 | #Modules
import configparser
from pokemontcgsdk import Set
from pokemontcgsdk import Card
from pymongo import MongoClient
#Config
Config = configparser.ConfigParser()
Config.read("settings.ini")
host = Config.get("db", "host")
port = Config.get("db", "port")
user = Config.get("db", "user")
password = Config.get("db", "password")
pass_colon_str = ""
at_str = ""
if user:
pass_colon_str = ":"
at_str = "@"
#Build Connection String
connection_string = "mongodb://{0}{1}{2}{3}{4}:{5}".format(user, pass_colon_str, password, at_str, host, port)
#Get the sets from the pokemontcg api
print("Getting sets from pokemontcgsdk")
pokemontcgapi_sets = Set.all()
print(" Found sets:")
for pokemontcgapi_set in pokemontcgapi_sets:
print(" -- {0}".format(pokemontcgapi_set.name))
#Connect to Mongo
print("Connecting to {0}".format(connection_string))
mongo_client = MongoClient(connection_string)
#Get the Database Object
card_data_database = mongo_client.card_data
sets_collection = card_data_database.sets
cards_collection = card_data_database.cards
#Get all the sets that we already have cards for
print("\nGetting sets from {0}".format(host))
mongo_sets_cursor = sets_collection.find()
#For each card, insert a document into mongo
print("\nInserting Cards into mongo")
for pokemontcgapi_set in pokemontcgapi_sets:
already_have_set = False
print("Checking for {0}({1})".format(pokemontcgapi_set.name, pokemontcgapi_set.code))
for mongo_set in mongo_sets_cursor:
if mongo_set.get('code') == pokemontcgapi_set.code:
already_have_set = True
print("Skipping {0}({1})".format(mongo_set.get('name'), mongo_set.get('code')))
break
if not already_have_set:
print("\nInserting {0}:".format(pokemontcgapi_set.name))
print("***********************************")
#Get the cards from the set
cards = Card.where(setCode=pokemontcgapi_set.code).all()
#Insert each card document into mongo
for card in cards:
print("-- {0}({1})".format(card.name, card.id))
cards_collection.insert_one({
"pokemontcgapi_id": card.id,
"name": card.name,
"national_pokedex_number": card.national_pokedex_number,
"image_url": card.image_url,
"subtype": card.subtype,
"supertype": card.supertype,
"ability": card.ability,
"ancient_trait": card.ancient_trait,
"hp": card.hp,
"number": card.number,
"artist": card.artist,
"rarity": card.rarity,
"series": card.series,
"set": card.set,
"set_code": card.set_code,
"retreat_cost": card.retreat_cost,
"text": card.text,
"types": card.types,
"attacks": card.attacks,
"weaknesses": card.weaknesses,
"resistances": card.resistances
})
sets_collection.insert_one({
"code": pokemontcgapi_set.code,
"name": pokemontcgapi_set.name,
"series": pokemontcgapi_set.series,
"total_cards": pokemontcgapi_set.total_cards,
"standard_legal": pokemontcgapi_set.standard_legal,
"expanded_legal": pokemontcgapi_set.expanded_legal,
"release_date": pokemontcgapi_set.release_date
})
print("Finished inserting {0}({1})\n\n".format(pokemontcgapi_set.name, pokemontcgapi_set.code))
print("\nClosing connection to {0}".format(host))
mongo_client.close() | mit | 3,130,499,382,758,920,000 | 35.38 | 110 | 0.606819 | false | 3.405431 | true | false | false |
michaldz44/pyG-Attract | golem.py | 1 | 3778 | import math
import pdb
class Golem(object):
def __init__(self, x, y, args, attractors,golem_number):
self.attractors=attractors
self.args=args
self.position=complex(x,y)
self.velocity=complex(0,0)
#self.acceleration_previous=self.attractors.get_force(self.position,self.velocity)
self.acceleration_previous=0
self.final_attractor=None
self.energy=self.get_energy()
self.golem_number=golem_number
self.state=[]
def move(self):
# step
absv=abs(self.velocity)
if absv>1:
dt=self.args.dt*1/(absv)
else:
dt=self.args.dt
acceleration_current=self.attractors.get_force(self.position,self.velocity)
# let's ty to be accurate apply Beeman-Schofield algoritm
#
# position=\
# self.position+\
# self.velocity*dt+\
# dt*dt*(4*acceleration_current-self.acceleration_previous)/6.0
#
# v_predict=\
# self.velocity+\
# dt*(3*acceleration_current-self.acceleration_previous)/2.0
#
# acceleration_future=self.attractors.get_force(position,v_predict)
#
# self.velocity+=dt*(2*acceleration_future+5*acceleration_current-self.acceleration_previous)/6.0
#
# self.acceleration_previous=acceleration_current
# self.position=position
# Euler-Cromer fast simplified version
self.velocity+=acceleration_current*dt
self.position+=self.velocity*dt
if (self.energy-self.attractors.get_potencial(self.position))>0:
v=math.sqrt(2*(self.energy-self.attractors.get_potencial(self.position)))
else:
print("drag problem - velocity anihilated",self.golem_number,abs(self.velocity))
if abs(self.velocity)>0.1:
pdb.set_trace()
v=0.000001
#v=-math.sqrt(-2*(self.energy-self.attractors.get_potencial(self.position)))
absv=abs(self.velocity)
self.velocity=v*self.velocity/absv
#self.q=v/absv
self.energy-=dt*self.args.mu*absv*absv
#
# self.state.append((
# abs(self.velocity),
# self.attractors.get_potencial(self.position),
# self.energy,
# dt
# ))
#self.vpredict = self.velocity+ (3.0*self.acceleration0 - self.acceleration1)*dt/2.0
#self.acceleration2 += self.attractors.get_force(self.position,self.vpredict)
#self.acceleration2 += self.position - self.args.mu*self.vpredict
#self.velocity += (2.0*self.acceleration2+5.0*self.acceleration0 - self.acceleration1)*dt/6.0
#self.acceleration1 = self.acceleration0
#self.acceleration0 = self.acceleration2
def get_energy(self):
#print(self.attractors.get_potencial(self.position))
return self.attractors.get_potencial(self.position)+abs(self.velocity)**2/2.0
def do_move(self):
if self.final_attractor:
return False
self.move()
self.end_check()
return True
def get_color(self):
if self.final_attractor:
return self.final_attractor["color"]
def end_check(self):
# if final attrator is set we are fixed (attracted)
if self.attractors.min_distance(self.position) < self.args.pot_d and abs(self.velocity) < self.args.term_v: # close to the city and low velocity
self.final_attractor=self.attractors.min_attractor(self.position)
return True
if self.energy<self.attractors.min_attractor(self.position)["esc_energy"]:
self.final_attractor=self.attractors.min_attractor(self.position)
return True
return False
| gpl-2.0 | 6,252,138,201,953,294,000 | 35.679612 | 153 | 0.618052 | false | 3.409747 | false | false | false |
KnowledgeLinks/rdfframework | rdfframework/utilities/valuecalculator.py | 1 | 1389 | import datetime
import pytz
# try:
# from rdfframework.utilities import iri, uri
# except ImportError:
# # Try Local Import
# from . import iri, uri
# def calculate_default_value(field):
# '''calculates the default value based on the field default input'''
# _calculation_string = field.get("kds_defaultVal", field.get("defaultVal"))
# _return_val = None
# if _calculation_string is None:
# return None
# if _calculation_string.startswith("item_class"):
# _return_val = iri(uri(field.get("kds_classUri",field.get("classUri"))))
# else:
# _calc_params = _calculation_string.split('+')
# _base = _calc_params[0].strip()
# if len(_calc_params) > 1:
# _add_value = float(_calc_params[1].strip())
# else:
# _add_value = 0
# if _base == 'today':
# _return_val = datetime.datetime.utcnow().replace(tzinfo = pytz.utc).date() +\
# datetime.timedelta(days=_add_value)
# elif _base == 'now':
# _return_val = datetime.datetime.utcnow().replace(tzinfo = pytz.utc) +\
# datetime.timedelta(days=_add_value)
# elif _base == 'time':
# _return_val = datetime.datetime.utcnow().replace(tzinfo = pytz.utc).time() +\
# datetime.timedelta(days=_add_value)
# return _return_val
| mit | -6,869,284,743,283,799,000 | 39.852941 | 91 | 0.572354 | false | 3.481203 | false | false | false |
aliunsal/blog | Blogs/models.py | 1 | 1247 | from django.db import models
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
class Comment(models.Model):
content = models.TextField(null=False)
date = models.DateTimeField(auto_now_add=True)
author = models.ForeignKey(User, default=0)
approved = models.BooleanField(default=False)
activation_key = models.TextField(max_length=150)
email = models.EmailField(null=False)
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
comment = GenericRelation("Comment")
def __unicode__(self):
return self.content
class Post(models.Model):
title = models.CharField(max_length=300)
content = models.TextField(null=True)
date = models.DateTimeField(auto_now_add=True)
picture = models.ImageField(upload_to='static/img/post_image/',
null=True)
author = models.ForeignKey(User)
comment = GenericRelation(Comment)
def __unicode__(self):
return self.title
class Meta:
ordering = ["-id"] | gpl-2.0 | 1,953,466,976,391,067,100 | 31.842105 | 82 | 0.705694 | false | 4.212838 | false | false | false |
botswana-harvard/edc-visit-tracking | edc_visit_tracking/form_validators/visit_form_validator.py | 1 | 4772 | from django import forms
from edc_constants.constants import OTHER, ALIVE, DEAD, YES, UNKNOWN
from edc_constants.constants import PARTICIPANT, NO
from edc_form_validators import FormValidator
from edc_form_validators.base_form_validator import REQUIRED_ERROR,\
INVALID_ERROR
from ..constants import MISSED_VISIT, LOST_VISIT, UNSCHEDULED
from ..visit_sequence import VisitSequence, VisitSequenceError
class VisitFormValidator(FormValidator):
visit_sequence_cls = VisitSequence
participant_label = 'participant'
def clean(self):
appointment = self.cleaned_data.get('appointment')
if not appointment:
raise forms.ValidationError({
'appointment': 'This field is required'},
code=REQUIRED_ERROR)
visit_sequence = self.visit_sequence_cls(appointment=appointment)
try:
visit_sequence.enforce_sequence()
except VisitSequenceError as e:
raise forms.ValidationError(e, code=INVALID_ERROR)
self.validate_visit_code_sequence_and_reason()
self.validate_presence()
self.validate_survival_status_if_alive()
self.validate_reason_and_info_source()
self.validate_required_fields()
def validate_visit_code_sequence_and_reason(self):
appointment = self.cleaned_data.get('appointment')
reason = self.cleaned_data.get('reason')
if appointment:
if (not appointment.visit_code_sequence
and reason == UNSCHEDULED):
raise forms.ValidationError({
'reason': 'Invalid. This is not an unscheduled visit'},
code=INVALID_ERROR)
if (appointment.visit_code_sequence
and reason != UNSCHEDULED):
raise forms.ValidationError({
'reason': 'Invalid. This is an unscheduled visit'},
code=INVALID_ERROR)
def validate_reason_and_info_source(self):
cleaned_data = self.cleaned_data
condition = cleaned_data.get('reason') != MISSED_VISIT
self.required_if_true(
condition,
field_required='info_source',
required_msg='Provide source of information.')
def validate_survival_status_if_alive(self):
cleaned_data = self.cleaned_data
if cleaned_data.get('survival_status') in [ALIVE, DEAD]:
if not cleaned_data.get('last_alive_date'):
raise forms.ValidationError(
{'last_alive_date':
f'Provide date {self.participant_label} last known alive.'})
def validate_presence(self):
"""Raise an exception if 'is_present' does not make sense relative to
'survival status', 'reason' and 'info_source'."""
cleaned_data = self.cleaned_data
if cleaned_data.get('is_present') == YES:
if cleaned_data.get('survival_status') in [UNKNOWN, DEAD]:
raise forms.ValidationError(
{'survival_status':
'Survival status cannot be \'{survival_status}\' if '
'{participant} is present.'.format(
survival_status=cleaned_data.get(
'survival_status').lower(),
participant=self.participant_label)})
if cleaned_data.get('reason') in [MISSED_VISIT, LOST_VISIT]:
raise forms.ValidationError(
{'reason':
'You indicated that the reason for the visit report is '
'{reason} but also that the {participant} is present. '
'Please correct.'.format(
participant=self.participant_label,
reason=cleaned_data.get('reason'))})
elif cleaned_data.get('is_present') == NO:
if cleaned_data.get('info_source') == PARTICIPANT:
raise forms.ValidationError(
{'info_source': 'Source of information cannot be from '
'{participant} if {participant} is not present.'.format(
participant=self.participant_label)})
def validate_required_fields(self):
self.required_if(
MISSED_VISIT,
field='reason',
field_required='reason_missed')
self.required_if(
UNSCHEDULED,
field='reason',
field_required='reason_unscheduled')
self.required_if(
OTHER,
field='info_source',
field_required='info_source_other')
self.required_if(
OTHER,
field='reason_unscheduled',
field_required='reason_unscheduled_other')
| gpl-2.0 | 4,630,544,670,171,520,000 | 38.438017 | 81 | 0.582775 | false | 4.455649 | false | false | false |
pyload/pyload | src/pyload/plugins/downloaders/ZDF.py | 1 | 2269 | # -*- coding: utf-8 -*-
import re
import json
import os
from pyload.core.network.request_factory import get_url
import xml.etree.ElementTree as etree
import pycurl
from ..base.downloader import BaseDownloader
# Based on zdfm by Roland Beermann (http://github.com/enkore/zdfm/)
class ZDF(BaseDownloader):
__name__ = "ZDF Mediathek"
__type__ = "downloader"
__version__ = "0.92"
__status__ = "testing"
__pattern__ = r"https://(?:www\.)?zdf\.de/(?P<ID>[/\w-]+)\.html"
__config__ = [
("enabled", "bool", "Activated", True),
("use_premium", "bool", "Use premium account if available", True),
("fallback", "bool", "Fallback to free download if premium fails", True),
("chk_filesize", "bool", "Check file size", True),
("max_wait", "int", "Reconnect if waiting time is greater than minutes", 10),
]
__description__ = """ZDF.de downloader plugin"""
__license__ = "GPLv3"
__authors__ = []
def process(self, pyfile):
self.data = self.load(pyfile.url)
try:
api_token = re.search(
r'window\.zdfsite\.player\.apiToken = "([\d\w]+)";', self.data
).group(1)
self.req.http.c.setopt(pycurl.HTTPHEADER, ["Api-Auth: Bearer " + api_token])
id = re.match(self.__pattern__, pyfile.url).group("ID")
filename = json.loads(
self.load(
"https://api.zdf.de/content/documents/zdf/" + id + ".json",
get={"profile": "player-3"},
)
)
stream_list = filename["mainVideoContent"]["http://zdf.de/rels/target"][
"streams"
]["default"]["extId"]
streams = json.loads(
self.load(
"https://api.zdf.de/tmd/2/ngplayer_2_4/vod/ptmd/mediathek/"
+ stream_list
)
)
download_name = streams["priorityList"][0]["formitaeten"][0]["qualities"][
0
]["audio"]["tracks"][0]["uri"]
self.pyfile.name = os.path.basename(id) + os.path.splitext(download_name)[1]
self.download(download_name)
except Exception as exc:
self.log_error(exc)
| agpl-3.0 | -6,561,568,899,936,143,000 | 32.865672 | 88 | 0.527545 | false | 3.59588 | false | false | false |
seleniumbase/SeleniumBase | seleniumbase/core/download_helper.py | 1 | 2057 | import os
import shutil
import time
from seleniumbase.config import settings
from seleniumbase.fixtures import constants
# The "downloads_folder" is a folder for saving downloaded files.
# Works for downloads initiated by Chromium and Firefox WebDriver clicks.
# Browser type doesn't matter if using self.download_file(file_url)
# or self.save_file_as(file_url, new_file_name)
# The "downloads_folder" is cleaned out at the start of each pytest run,
# but there is an option to save existing files in "archived_files".
DOWNLOADS_DIR = constants.Files.DOWNLOADS_FOLDER
ARCHIVE_DIR = constants.Files.ARCHIVED_DOWNLOADS_FOLDER
abs_path = os.path.abspath(".")
downloads_path = os.path.join(abs_path, DOWNLOADS_DIR)
def get_downloads_folder():
return downloads_path
def reset_downloads_folder():
"""Clears the downloads folder.
If settings.ARCHIVE_EXISTING_DOWNLOADS is set to True, archives it."""
if os.path.exists(downloads_path) and not os.listdir(downloads_path) == []:
archived_downloads_folder = os.path.join(
downloads_path, "..", ARCHIVE_DIR
)
reset_downloads_folder_assistant(archived_downloads_folder)
def reset_downloads_folder_assistant(archived_downloads_folder):
if not os.path.exists(archived_downloads_folder):
try:
os.makedirs(archived_downloads_folder)
except Exception:
pass # Should only be reachable during multi-threaded test runs
new_archived_downloads_sub_folder = "%s/downloads_%s" % (
archived_downloads_folder,
int(time.time()),
)
if os.path.exists(downloads_path):
if not os.listdir(downloads_path) == []:
try:
shutil.move(downloads_path, new_archived_downloads_sub_folder)
os.makedirs(downloads_path)
except Exception:
pass
if not settings.ARCHIVE_EXISTING_DOWNLOADS:
try:
shutil.rmtree(new_archived_downloads_sub_folder)
except OSError:
pass
| mit | -4,152,373,432,248,076,000 | 36.4 | 79 | 0.674283 | false | 3.933078 | false | false | false |
CountZer0/PipelineConstructionSet | python/maya/site-packages/pymel-1.0.5/pymel/core/nodetypes.py | 1 | 123778 | """
Contains classes corresponding to the Maya type hierarchy, including `DependNode`, `Transform`, `Mesh`, and `Camera`.
"""
import sys, os, re
import inspect, itertools, math
import pymel.util as _util
import pymel.internal.pmcmds as cmds #@UnresolvedImport
import pymel.internal.factories as _factories
import pymel.api as _api #@UnresolvedImport
import pymel.internal.apicache as _apicache
import pymel.internal.pwarnings as _warnings
from pymel.internal import getLogger as _getLogger
import datatypes
_logger = _getLogger(__name__)
# to make sure Maya is up
import pymel.internal as internal
import pymel.versions as versions
from maya.cmds import about as _about
import maya.mel as mm
#from general import *
import general
import other
from animation import listAnimatable as _listAnimatable
from system import namespaceInfo as _namespaceInfo, FileReference as _FileReference
_thisModule = sys.modules[__name__]
#__all__ = ['Component', 'MeshEdge', 'MeshVertex', 'MeshFace', 'Attribute', 'DependNode' ]
## Mesh Components
# If we're reloading, clear the pynode types out
_factories.clearPyNodeTypes()
class DependNode( general.PyNode ):
__apicls__ = _api.MFnDependencyNode
__metaclass__ = _factories.MetaMayaNodeWrapper
#-------------------------------
# Name Info and Manipulation
#-------------------------------
# def __new__(cls,name,create=False):
# """
# Provides the ability to create the object when creating a class
#
# >>> n = pm.Transform("persp",create=True)
# >>> n.__repr__()
# # Result: nt.Transform(u'persp1')
# """
# if create:
# ntype = cls.__melnode__
# name = createNode(ntype,n=name,ss=1)
# return general.PyNode.__new__(cls,name)
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self._apiobject.object() )
@_util.universalmethod
def __melobject__(self):
"""Special method for returning a mel-friendly representation."""
if isinstance(self, DependNode):
# For instance, return the node's name...
return self.name()
else:
# For the class itself, return the mel node name
return self.__melnode__
def __repr__(self):
"""
:rtype: `unicode`
"""
return u"nt.%s(%r)" % (self.__class__.__name__, self.name())
def _updateName(self) :
# test validity
self.__apimobject__()
self._name = self.__apimfn__().name()
return self._name
def name(self, update=True, stripNamespace=False) :
"""
:rtype: `unicode`
"""
if update or self._name is None:
try:
self._updateName()
except general.MayaObjectError:
_logger.warn( "object %s no longer exists" % self._name )
name = self._name
if stripNamespace:
name = name.rsplit(':', 1)[-1]
return name
def shortName(self):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name()
def longName(self):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name()
def nodeName(self, **kwargs):
"""
This produces the same results as `DependNode.name` and is included to simplify looping over lists
of nodes that include both Dag and Depend nodes.
:rtype: `unicode`
"""
return self.name(**kwargs)
#rename = rename
def rename( self, name, **kwargs ):
"""
:rtype: `DependNode`
"""
#self.setName( name ) # no undo support
#check for preserveNamespace a pymel unique flag
if kwargs.pop('preserveNamespace', False):
name = self.namespace(root=True) + name
#ensure shortname
if '|' in name:
name = name.split('|')[-1]
return general.rename(self, name, **kwargs)
def __apiobject__(self) :
"get the default API object (MObject) for this node if it is valid"
return self.__apimobject__()
def __apimobject__(self) :
"get the MObject for this node if it is valid"
handle = self.__apihandle__()
if _api.isValidMObjectHandle( handle ) :
return handle.object()
raise general.MayaNodeError( self._name )
def __apihandle__(self) :
return self.__apiobjects__['MObjectHandle']
def __str__(self):
return "%s" % self.name()
def __unicode__(self):
return u"%s" % self.name()
if versions.current() >= versions.v2009:
def __hash__(self):
return self.__apihandle__().hashCode()
def node(self):
"""for compatibility with Attribute class
:rtype: `DependNode`
"""
return self
#--------------------------
# Modification
#--------------------------
def lock( self, **kwargs ):
'lockNode -lock 1'
#kwargs['lock'] = True
#kwargs.pop('l',None)
#return cmds.lockNode( self, **kwargs)
return self.setLocked( True )
def unlock( self, **kwargs ):
'lockNode -lock 0'
#kwargs['lock'] = False
#kwargs.pop('l',None)
#return cmds.lockNode( self, **kwargs)
return self.setLocked( False )
def cast( self, swapNode, **kwargs):
"""nodeCast"""
return cmds.nodeCast( self, swapNode, *kwargs )
duplicate = general.duplicate
#--------------------------
#xxx{ Presets
#--------------------------
def savePreset(self, presetName, custom=None, attributes=[]):
kwargs = {'save':True}
if attributes:
kwargs['attributes'] = ' '.join(attributes)
if custom:
kwargs['custom'] = custom
return cmds.nodePreset( presetName, **kwargs)
def loadPreset(self, presetName):
kwargs = {'load':True}
return cmds.nodePreset( presetName, **kwargs)
def deletePreset(self, presetName):
kwargs = {'delete':True}
return cmds.nodePreset( presetName, **kwargs)
def listPresets(self):
kwargs = {'list':True}
return cmds.nodePreset( **kwargs)
#}
#--------------------------
#xxx{ Info
#--------------------------
type = general.nodeType
def referenceFile(self):
"""referenceQuery -file
Return the reference file to which this object belongs. None if object is not referenced
:rtype: `FileReference`
"""
try:
return _FileReference( cmds.referenceQuery( self, f=1) )
except RuntimeError:
None
isReadOnly = _factories.wrapApiMethod( _api.MFnDependencyNode, 'isFromReferencedFile', 'isReadOnly' )
def classification(self):
'getClassification'
return general.getClassification( self.type() )
#return self.__apimfn__().classification( self.type() )
#}
#--------------------------
#xxx{ Connections
#--------------------------
def inputs(self, **kwargs):
"""listConnections -source 1 -destination 0
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None )
kwargs['destination'] = False
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def outputs(self, **kwargs):
"""listConnections -source 0 -destination 1
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None )
kwargs['destination'] = True
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def sources(self, **kwargs):
"""listConnections -source 1 -destination 0
:rtype: `PyNode` list
"""
kwargs['source'] = True
kwargs.pop('s', None )
kwargs['destination'] = False
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def destinations(self, **kwargs):
"""listConnections -source 0 -destination 1
:rtype: `PyNode` list
"""
kwargs['source'] = False
kwargs.pop('s', None )
kwargs['destination'] = True
kwargs.pop('d', None )
return general.listConnections(self, **kwargs)
def shadingGroups(self):
"""list any shading groups in the future of this object - works for
shading nodes, transforms, and shapes
Also see listSets(type=1) - which returns which 'rendering sets' the
object is a member of (and 'rendering sets' seem to consist only of
shading groups), whereas this method searches the object's future for
any nodes of type 'shadingEngine'.
:rtype: `DependNode` list
"""
return self.future(type='shadingEngine')
#}
#--------------------------
#xxx{ Attributes
#--------------------------
def __getattr__(self, attr):
try :
return getattr(super(general.PyNode, self), attr)
except AttributeError :
try:
return DependNode.attr(self,attr)
except general.MayaAttributeError, e:
# since we're being called via __getattr__ we don't know whether the user was intending
# to get a class method or a maya attribute, so we raise a more generic AttributeError
raise AttributeError,"%r has no attribute or method named '%s'" % (self, attr)
@_util.universalmethod
def attrDefaults(obj, attr): #@NoSelf
"""
Access to an attribute of a node. This does not require an instance:
>>> nt.Transform.attrDefaults('tx').isKeyable()
True
but it can use one if needed ( for example, for dynamically created attributes )
>>> nt.Transform(u'persp').attrDefaults('tx').isKeyable()
Note: this is still experimental.
"""
if inspect.isclass(obj):
self = None
cls = obj # keep things familiar
else:
self = obj # keep things familiar
cls = type(obj)
attributes = cls.__apiobjects__.setdefault('MFnAttributes', {})
attrObj = attributes.get(attr, None)
if not _api.isValidMObject(attrObj):
def toAttrObj(apiObj):
try:
attrObj = apiObj.attribute(attr)
if attrObj.isNull():
raise RuntimeError
except RuntimeError:
# just try it first, then check if it has the attribute if
# we errored (as opposed to always check first if the node
# has the attribute), on the assumption that this will be
# "faster" for most cases, where the node actually DOES have
# the attribute...
if not apiObj.hasAttribute(attr):
raise general.MayaAttributeError('%s.%s' % (cls.__melnode__, attr))
else:
# don't know why we got this error, so just reraise
raise
return attrObj
if self is None:
if hasattr(_api, 'MNodeClass'):
# Yay, we have MNodeClass, use it!
nodeCls = _api.MNodeClass(cls.__melnode__)
attrObj = toAttrObj(nodeCls)
else:
# We don't have an instance of the node, we need
# to make a ghost one...
with _apicache._GhostObjMaker(cls.__melnode__) as nodeObj:
if nodeObj is None:
# for instance, we get this if we have an abstract class...
raise RuntimeError("Unable to get attribute defaults for abstract node class %s, in versions prior to 2012" % cls.__melnode__)
nodeMfn = cls.__apicls__(nodeObj)
attrObj = toAttrObj(nodeMfn)
else:
nodeMfn = self.__apimfn__()
attrObj = toAttrObj(nodeMfn)
attributes[attr] = attrObj
return general.AttributeDefaults( attrObj )
def attr(self, attr):
"""
access to attribute plug of a node. returns an instance of the Attribute class for the
given attribute name.
:rtype: `Attribute`
"""
return self._attr(attr, False)
# Just have this alias because it will sometimes return attributes for an
# underlying shape, which we may want for DagNode.attr, but don't want for
# DependNode.attr (and using the on-shape result, instead of throwing it
# away and then finding it again on the shape, saves time for the DagNode
# case)
def _attr(self, attr, allowOtherNode):
#return Attribute( '%s.%s' % (self, attr) )
try :
if '.' in attr or '[' in attr:
# Compound or Multi Attribute
# there are a couple of different ways we can proceed:
# Option 1: back out to _api.toApiObject (via general.PyNode)
# return Attribute( self.__apiobject__(), self.name() + '.' + attr )
# Option 2: nameparse.
# this avoids calling self.name(), which can be slow
import pymel.util.nameparse as nameparse
nameTokens = nameparse.getBasicPartList( 'dummy.' + attr )
result = self.__apiobject__()
for token in nameTokens[1:]: # skip the first, bc it's the node, which we already have
if isinstance( token, nameparse.MayaName ):
if isinstance( result, _api.MPlug ):
# you can't get a child plug from a multi/array plug.
# if result is currently 'defaultLightList1.lightDataArray' (an array)
# and we're trying to get the next plug, 'lightDirection', then we need a dummy index.
# the following line will reuslt in 'defaultLightList1.lightDataArray[-1].lightDirection'
if result.isArray():
result = self.__apimfn__().findPlug( unicode(token) )
else:
result = result.child( self.__apimfn__().attribute( unicode(token) ) )
else: # Node
result = self.__apimfn__().findPlug( unicode(token) )
# # search children for the attribute to simulate cam.focalLength --> perspShape.focalLength
# except TypeError:
# for i in range(fn.childCount()):
# try: result = _api.MFnDagNode( fn.child(i) ).findPlug( unicode(token) )
# except TypeError: pass
# else:break
if isinstance( token, nameparse.NameIndex ):
if token.value != -1:
result = result.elementByLogicalIndex( token.value )
plug = result
else:
try:
plug = self.__apimfn__().findPlug( attr, False )
except RuntimeError:
# Don't use .findAlias, as it always returns the 'base'
# attribute - ie, if the alias is to foo[0].bar, it will
# just point to foo
# aliases
#obj = _api.MObject()
#self.__apimfn__().findAlias( attr, obj )
#plug = self.__apimfn__().findPlug( obj, False )
# the following technique gets aliased attributes as well. turning dagPlugs to off saves time because we already
# know the dagNode. however, certain attributes, such as rotatePivot, are detected as components,
# despite the fact that findPlug finds them as MPlugs. need to look into this
# TODO: test speed versus above method
try:
plug = _api.toApiObject(self.name() + '.' + attr, dagPlugs=False)
except RuntimeError:
raise
if not isinstance(plug, _api.MPlug):
raise RuntimeError
if not (allowOtherNode or plug.node() == self.__apimobject__()):
# we could have gotten an attribute on a shape object,
# which we don't want
raise RuntimeError
return general.Attribute( self.__apiobject__(), plug )
except RuntimeError:
# raise our own MayaAttributeError, which subclasses AttributeError and MayaObjectError
raise general.MayaAttributeError( '%s.%s' % (self, attr) )
hasAttr = general.hasAttr
@_factories.addMelDocs('setAttr')
def setAttr( self, attr, *args, **kwargs):
# for now, using strings is better, because there is no MPlug support
return general.setAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('setAttr')
def setDynamicAttr( self, attr, *args, **kwargs):
"""
same as `DependNode.setAttr` with the force flag set to True. This causes
the attribute to be created based on the passed input value.
"""
# for now, using strings is better, because there is no MPlug support
kwargs['force'] = True
return general.setAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('getAttr')
def getAttr( self, attr, *args, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.getAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('addAttr')
def addAttr( self, attr, **kwargs):
# for now, using strings is better, because there is no MPlug support
assert 'longName' not in kwargs and 'ln' not in kwargs
kwargs['longName'] = attr
return general.addAttr( unicode(self), **kwargs )
@_factories.addMelDocs('deleteAttr')
def deleteAttr( self, attr, *args, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.deleteAttr( "%s.%s" % (self, attr), *args, **kwargs )
@_factories.addMelDocs('connectAttr')
def connectAttr( self, attr, destination, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.connectAttr( "%s.%s" % (self, attr), destination, **kwargs )
@_factories.addMelDocs('disconnectAttr')
def disconnectAttr( self, attr, destination=None, **kwargs ):
# for now, using strings is better, because there is no MPlug support
return general.disconnectAttr( "%s.%s" % (self, attr), destination, **kwargs )
listAnimatable = _listAnimatable
def listAttr( self, **kwargs):
"""
listAttr
Modifications:
- returns an empty list when the result is None
- added 'alias' keyword to list attributes that have aliases
:rtype: `Attribute` list
"""
alias = kwargs.pop('alias', False)
# stringify fix
res = map( lambda x: self.attr(x), _util.listForNone(cmds.listAttr(self.name(), **kwargs)))
if alias:
res = [ x[1] for x in self.listAliases() if x[1] in res]
# aliases = dict( (x[1], x[0]) for x in general.aliasAttr(self.name()) )
# tmp = res
# res = []
# for at in tmp:
# try:
# res.append( aliases[at], at )
# except KeyError:
# pass
return res
def listAliases( self ):
"""
aliasAttr
Modifications:
- returns an empty list when the result is None
- when queried, returns a list of (alias, `Attribute`) pairs.
:rtype: (`str`, `Attribute`) list
"""
#tmp = _util.listForNone(cmds.aliasAttr(self.name(),query=True))
tmp = []
self.__apimfn__().getAliasList(tmp)
res = []
for i in range(0,len(tmp),2):
res.append((tmp[i], general.Attribute(self.node() + '.' + tmp[i+1])))
return res
def attrInfo( self, **kwargs):
"""attributeInfo
:rtype: `Attribute` list
"""
# stringify fix
return map( lambda x: self.attr(x) , _util.listForNone(cmds.attributeInfo(self.name(), **kwargs)))
#}
#-----------------------------------------
#xxx{ Name Info and Manipulation
#-----------------------------------------
# Now just wraps NameParser functions
def stripNum(self):
"""Return the name of the node with trailing numbers stripped off. If no trailing numbers are found
the name will be returned unchanged.
>>> from pymel.core import *
>>> SCENE.lambert1.stripNum()
u'lambert'
:rtype: `unicode`
"""
return other.NameParser(self).stripNum()
def extractNum(self):
"""Return the trailing numbers of the node name. If no trailing numbers are found
an error will be raised.
>>> from pymel.core import *
>>> SCENE.lambert1.extractNum()
u'1'
:rtype: `unicode`
"""
return other.NameParser(self).extractNum()
def nextUniqueName(self):
"""Increment the trailing number of the object until a unique name is found
If there is no trailing number, appends '1' to the name.
:rtype: `unicode`
"""
return other.NameParser(self).nextUniqueName()
def nextName(self):
"""Increment the trailing number of the object by 1
Raises an error if the name has no trailing number.
>>> from pymel.core import *
>>> SCENE.lambert1.nextName()
DependNodeName(u'lambert2')
:rtype: `unicode`
"""
return other.NameParser(self).nextName()
def prevName(self):
"""Decrement the trailing number of the object by 1
Raises an error if the name has no trailing number.
:rtype: `unicode`
"""
return other.NameParser(self).prevName()
@classmethod
def registerVirtualSubClass( cls, nameRequired=False ):
"""
Deprecated
"""
_factories.registerVirtualClass(cls, nameRequired)
#}
if versions.current() >= versions.v2011:
class ContainerBase(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
class Entity(ContainerBase):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
else:
class Entity(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
pass
class DagNode(Entity):
#:group Path Info and Modification: ``*parent*``, ``*Parent*``, ``*child*``, ``*Child*``
"""
"""
__apicls__ = _api.MFnDagNode
__metaclass__ = _factories.MetaMayaNodeWrapper
# def __init__(self, *args, **kwargs ):
# self.apicls.__init__(self, self.__apimdagpath__() )
_componentAttributes = {}
def comp(self, compName):
"""
Will retrieve a Component object for this node; similar to
DependNode.attr(), but for components.
:rtype: `Component`
"""
if compName in self._componentAttributes:
compClass = self._componentAttributes[compName]
if isinstance(compClass, tuple):
# We have something like:
# 'uIsoparm' : (NurbsSurfaceIsoparm, 'u')
# need to specify what 'flavor' of the basic
# component we need...
return compClass[0](self, {compClass[1]:general.ComponentIndex(label=compClass[1])})
else:
return compClass(self)
# if we do self.getShape(), and this is a shape node, we will
# enter a recursive loop if compName isn't actually a comp:
# since shape doesn't have 'getShape', it will call __getattr__
# for 'getShape', which in turn call comp to check if it's a comp,
# which will call __getattr__, etc
# ..soo... check if we have a 'getShape'!
# ...also, don't use 'hasattr', as this will also call __getattr__!
try:
object.__getattribute__(self, 'getShape')
except AttributeError:
raise general.MayaComponentError( '%s.%s' % (self, compName) )
else:
shape = self.getShape()
if shape:
return shape.comp(compName)
def listComp(self, names=False):
"""Will return a list of all component objects for this object
Is to .comp() what .listAttr() is to .attr(); will NOT check the shape
node.
Parameters
----------
names : bool
By default, will return a list of actual usabale pymel Component
objects; if you just want a list of string names which would
be compatible with .comp(), set names to True
"""
keys = sorted(self._componentAttributes.keys())
if names:
return keys
compTypes = set()
comps = []
# use the sorted keys, so the order matches that returned by names,
# minus duplicate entries for aliases
for name in keys:
compType = self._componentAttributes[name]
if compType not in compTypes:
compTypes.add(compType)
comps.append(self.comp(name))
return comps
def _updateName(self, long=False) :
#if _api.isValidMObjectHandle(self._apiobject) :
#obj = self._apiobject.object()
#dagFn = _api.MFnDagNode(obj)
#dagPath = _api.MDagPath()
#dagFn.getPath(dagPath)
dag = self.__apimdagpath__()
if dag:
name = dag.partialPathName()
if not name:
raise general.MayaNodeError
self._name = name
if long :
return dag.fullPathName()
return self._name
def name(self, update=True, long=False) :
if update or long or self._name is None:
try:
return self._updateName(long)
except general.MayaObjectError:
_logger.warn( "object %s no longer exists" % self._name )
return self._name
def longName(self,stripNamespace=False,levels=0):
"""
The full dag path to the object, including leading pipe ( | )
:rtype: `unicode`
"""
if stripNamespace:
name = self.name(long=True)
nodes = []
for x in name.split('|'):
y = x.split('.')
z = y[0].split(':')
if levels:
y[0] = ':'.join( z[min(len(z)-1,levels):] )
else:
y[0] = z[-1]
nodes.append( '.'.join( y ) )
stripped_name = '|'.join( nodes)
return stripped_name
return self.name(long=True)
fullPath = longName
def shortName( self ):
"""
The shortest unique name.
:rtype: `unicode`
"""
return self.name(long=False)
def nodeName( self, stripNamespace=False ):
"""
Just the name of the node, without any dag path
:rtype: `unicode`
"""
name = self.name().rsplit('|', 1)[-1]
if stripNamespace:
name = name.rsplit(':', 1)[-1]
return name
def __apiobject__(self) :
"get the MDagPath for this object if it is valid"
return self.__apimdagpath__()
def __apimdagpath__(self) :
"get the MDagPath for this object if it is valid"
try:
dag = self.__apiobjects__['MDagPath']
# test for validity: if the object is not valid an error will be raised
self.__apimobject__()
return dag
except KeyError:
# class was instantiated from an MObject, but we can still retrieve the first MDagPath
#assert argObj.hasFn( _api.MFn.kDagNode )
dag = _api.MDagPath()
# we can't use self.__apimfn__() becaue the mfn is instantiated from an MDagPath
# which we are in the process of finding out
mfn = _api.MFnDagNode( self.__apimobject__() )
mfn.getPath(dag)
self.__apiobjects__['MDagPath'] = dag
return dag
# if dag.isValid():
# #argObj = dag
# if dag.fullPathName():
# argObj = dag
# else:
# print 'produced valid MDagPath with no name: %s(%s)' % ( argObj.apiTypeStr(), _api.MFnDependencyNode(argObj).name() )
def __apihandle__(self) :
try:
handle = self.__apiobjects__['MObjectHandle']
except KeyError:
try:
handle = _api.MObjectHandle( self.__apiobjects__['MDagPath'].node() )
except RuntimeError:
raise general.MayaNodeError( self._name )
self.__apiobjects__['MObjectHandle'] = handle
return handle
# def __apimfn__(self):
# if self._apimfn:
# return self._apimfn
# elif self.__apicls__:
# obj = self._apiobject
# if _api.isValidMDagPath(obj):
# try:
# self._apimfn = self.__apicls__(obj)
# return self._apimfn
# except KeyError:
# pass
# def __init__(self, *args, **kwargs):
# if self._apiobject:
# if isinstance(self._apiobject, _api.MObjectHandle):
# dagPath = _api.MDagPath()
# _api.MDagPath.getAPathTo( self._apiobject.object(), dagPath )
# self._apiobject = dagPath
#
# assert _api.isValidMDagPath( self._apiobject )
# def __init__(self, *args, **kwargs) :
# if args :
# arg = args[0]
# if len(args) > 1 :
# comp = args[1]
# if isinstance(arg, DagNode) :
# self._name = unicode(arg.name())
# self._apiobject = _api.MObjectHandle(arg.object())
# elif _api.isValidMObject(arg) or _api.isValidMObjectHandle(arg) :
# objHandle = _api.MObjectHandle(arg)
# obj = objHandle.object()
# if _api.isValidMDagNode(obj) :
# self._apiobject = objHandle
# self._updateName()
# else :
# raise TypeError, "%r might be a dependencyNode, but not a dagNode" % arg
# elif isinstance(arg, basestring) :
# obj = _api.toMObject (arg)
# if obj :
# # creation for existing object
# if _api.isValidMDagNode (obj):
# self._apiobject = _api.MObjectHandle(obj)
# self._updateName()
# else :
# raise TypeError, "%r might be a dependencyNode, but not a dagNode" % arg
# else :
# # creation for inexistent object
# self._name = arg
# else :
# raise TypeError, "don't know how to make a DagNode out of a %s : %r" % (type(arg), arg)
#--------------------------------
#xxx{ Path Info and Modification
#--------------------------------
def root(self):
"""rootOf
:rtype: `unicode`
"""
return DagNode( '|' + self.longName()[1:].split('|')[0] )
# def hasParent(self, parent ):
# try:
# return self.__apimfn__().hasParent( parent.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(parent)
# if obj:
# return self.__apimfn__().hasParent( obj )
#
# def hasChild(self, child ):
# try:
# return self.__apimfn__().hasChild( child.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(child)
# if obj:
# return self.__apimfn__().hasChild( obj )
#
# def isParentOf( self, parent ):
# try:
# return self.__apimfn__().isParentOf( parent.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(parent)
# if obj:
# return self.__apimfn__().isParentOf( obj )
#
# def isChildOf( self, child ):
# try:
# return self.__apimfn__().isChildOf( child.__apiobject__() )
# except AttributeError:
# obj = _api.toMObject(child)
# if obj:
# return self.__apimfn__().isChildOf( obj )
def isInstanceOf(self, other):
"""
:rtype: `bool`
"""
if isinstance( other, general.PyNode ):
return self.__apimobject__() == other.__apimobject__()
else:
try:
return self.__apimobject__() == general.PyNode(other).__apimobject__()
except:
return False
def instanceNumber(self):
"""
returns the instance number that this path represents in the DAG. The instance number can be used to determine which
element of the world space array attributes of a DAG node to connect to get information regarding this instance.
:rtype: `int`
"""
return self.__apimdagpath__().instanceNumber()
def getInstances(self, includeSelf=True):
"""
:rtype: `DagNode` list
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> s = polyPlane()[0]
>>> instance(s)
[nt.Transform(u'pPlane2')]
>>> instance(s)
[nt.Transform(u'pPlane3')]
>>> s.getShape().getInstances()
[nt.Mesh(u'pPlane1|pPlaneShape1'), nt.Mesh(u'pPlane2|pPlaneShape1'), nt.Mesh(u'pPlane3|pPlaneShape1')]
>>> s.getShape().getInstances(includeSelf=False)
[nt.Mesh(u'pPlane2|pPlaneShape1'), nt.Mesh(u'pPlane3|pPlaneShape1')]
"""
d = _api.MDagPathArray()
self.__apimfn__().getAllPaths(d)
thisDagPath = self.__apimdagpath__()
result = [ general.PyNode( _api.MDagPath(d[i])) for i in range(d.length()) if includeSelf or not d[i] == thisDagPath ]
return result
def getOtherInstances(self):
"""
same as `DagNode.getInstances` with includeSelf=False.
:rtype: `DagNode` list
"""
return self.getInstances(includeSelf=False)
def firstParent(self):
"""firstParentOf
:rtype: `DagNode`
"""
try:
return DagNode( '|'.join( self.longName().split('|')[:-1] ) )
except TypeError:
return DagNode( '|'.join( self.split('|')[:-1] ) )
# def numChildren(self):
# """
# see also `childCount`
#
# :rtype: `int`
# """
# return self.__apimdagpath__().childCount()
# def getParent(self, **kwargs):
# # TODO : print warning regarding removal of kwargs, test speed difference
# parent = _api.MDagPath( self.__apiobject__() )
# try:
# parent.pop()
# return general.PyNode(parent)
# except RuntimeError:
# pass
#
# def getChildren(self, **kwargs):
# # TODO : print warning regarding removal of kwargs
# children = []
# thisDag = self.__apiobject__()
# for i in range( thisDag.childCount() ):
# child = _api.MDagPath( thisDag )
# child.push( thisDag.child(i) )
# children.append( general.PyNode(child) )
# return children
def firstParent2(self, **kwargs):
"""unlike the firstParent command which determines the parent via string formatting, this
command uses the listRelatives command
"""
kwargs['parent'] = True
kwargs.pop('p',None)
#if longNames:
kwargs['fullPath'] = True
kwargs.pop('f',None)
try:
res = cmds.listRelatives( self, **kwargs)[0]
except TypeError:
return None
res = general.PyNode( res )
return res
@staticmethod
def _getDagParent(dag):
if dag.length() <= 1:
return None
# Need a copy as we'll be modifying it...
dag = _api.MDagPath(dag)
dag.pop()
return dag
def getParent(self, generations=1):
"""
Modifications:
- added optional generations flag, which gives the number of levels up that you wish to go for the parent;
ie:
>>> from pymel.core import *
>>> select(cl=1)
>>> bottom = group(n='bottom')
>>> group(n='almostThere')
nt.Transform(u'almostThere')
>>> group(n='nextLevel')
nt.Transform(u'nextLevel')
>>> group(n='topLevel')
nt.Transform(u'topLevel')
>>> bottom.longName()
u'|topLevel|nextLevel|almostThere|bottom'
>>> bottom.getParent(2)
nt.Transform(u'nextLevel')
Negative values will traverse from the top:
>>> bottom.getParent(generations=-3)
nt.Transform(u'almostThere')
A value of 0 will return the same node.
The default value is 1.
If generations is None, it will be interpreted as 'return all
parents', and a list will be returned.
Since the original command returned None if there is no parent, to sync with this behavior, None will
be returned if generations is out of bounds (no IndexError will be thrown).
:rtype: `DagNode`
"""
# Get the parent through the api - listRelatives doesn't handle instances correctly,
# and string processing seems unreliable...
res = general._getParent(self._getDagParent, self.__apimdagpath__(), generations)
if generations is None:
if res is None:
return []
return [general.PyNode(x) for x in res]
elif res is not None:
return general.PyNode( res )
def getAllParents(self):
"""
Return a list of all parents above this.
Starts from the parent immediately above, going up.
:rtype: `DagNode` list
"""
return self.getParent(generations=None)
def getChildren(self, **kwargs ):
"""
see also `childAtIndex`
for flags, see pymel.core.general.listRelatives
:rtype: `DagNode` list
"""
kwargs['children'] = True
kwargs.pop('c',None)
return general.listRelatives( self, **kwargs)
def getSiblings(self, **kwargs ):
"""
for flags, see pymel.core.general.listRelatives
:rtype: `DagNode` list
"""
#pass
try:
return [ x for x in self.getParent().getChildren(**kwargs) if x != self]
except:
return []
def listRelatives(self, **kwargs ):
"""
for flags, see pymel.core.general.listRelatives
:rtype: `PyNode` list
"""
return general.listRelatives( self, **kwargs)
def setParent( self, *args, **kwargs ):
"""
parent
Modifications:
- if parent is 'None', world=True is automatically set
- if the given parent is the current parent, don't error
"""
result = general.parent(self, *args, **kwargs)
if result:
result = result[0]
return result
def addChild( self, child, **kwargs ):
"""parent (reversed)
:rtype: `DagNode`
"""
cmds.parent( child, self, **kwargs )
if not isinstance( child, general.PyNode ):
child = general.PyNode(child)
return child
def __or__(self, child, **kwargs):
"""
operator for `addChild`. Use to easily daisy-chain together parenting operations.
The operation order visually mimics the resulting dag path:
>>> from pymel.core import *
>>> s = polySphere(name='sphere')[0]
>>> c = polyCube(name='cube')[0]
>>> t = polyTorus(name='torus')[0]
>>> s | c | t
nt.Transform(u'torus')
>>> print t.fullPath()
|sphere|cube|torus
:rtype: `DagNode`
"""
return self.addChild(child,**kwargs)
#}
#instance = instance
#--------------------------
# Shading
#--------------------------
def isDisplaced(self):
"""Returns whether any of this object's shading groups have a displacement shader input
:rtype: `bool`
"""
for sg in self.shadingGroups():
if len( sg.attr('displacementShader').inputs() ):
return True
return False
def hide(self):
self.visibility.set(0)
def show(self):
self.visibility.set(1)
def isVisible(self, checkOverride=True):
if not self.attr('visibility').get():
return False
if (checkOverride and self.attr('overrideEnabled').get()
and not self.attr('overrideVisibility').get()):
return False
parent = self.getParent()
if not parent:
return True
else:
return parent.isVisible(checkOverride=checkOverride)
def setObjectColor( self, color=None ):
"""This command sets the dormant wireframe color of the specified objects to an integer
representing one of the user defined colors, or, if set to None, to the default class color"""
kwargs = {}
if color:
kwargs['userDefined'] = color
cmds.color(self, **kwargs)
def makeLive( self, state=True ):
if not state:
cmds.makeLive(none=True)
else:
cmds.makeLive(self)
class Shape(DagNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getTransform(self): pass
def setParent(self, *args, **kwargs):
if 'shape' not in kwargs and 's' not in kwargs:
kwargs['s'] = True
super(Shape, self).setParent(*args, **kwargs)
#class Joint(Transform):
# pass
class Camera(Shape):
__metaclass__ = _factories.MetaMayaNodeWrapper
def applyBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['edit'] = True
kwargs['setCamera'] = True
cmds.cameraView( bookmark, **kwargs )
def addBookmark(self, bookmark=None):
kwargs = {}
kwargs['camera'] = self
kwargs['addBookmark'] = True
if bookmark:
kwargs['name'] = bookmark
cmds.cameraView( **kwargs )
def removeBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['removeBookmark'] = True
kwargs['name'] = bookmark
cmds.cameraView( **kwargs )
def updateBookmark(self, bookmark):
kwargs = {}
kwargs['camera'] = self
kwargs['edit'] = True
kwargs['setView'] = True
cmds.cameraView( bookmark, **kwargs )
def listBookmarks(self):
return self.bookmarks.inputs()
@_factories.addMelDocs('dolly')
def dolly(self, distance, relative=True):
kwargs = {}
kwargs['distance'] = distance
if relative:
kwargs['relative'] = True
else:
kwargs['absolute'] = True
cmds.dolly(self, **kwargs)
@_factories.addMelDocs('roll')
def roll(self, degree, relative=True):
kwargs = {}
kwargs['degree'] = degree
if relative:
kwargs['relative'] = True
else:
kwargs['absolute'] = True
cmds.roll(self, **kwargs)
#TODO: the functionFactory is causing these methods to have their docs doubled-up, in both pymel.track, and pymel.Camera.track
#dolly = _factories.functionFactory( cmds.dolly )
#roll = _factories.functionFactory( cmds.roll )
orbit = _factories.functionFactory( cmds.orbit )
track = _factories.functionFactory( cmds.track )
tumble = _factories.functionFactory( cmds.tumble )
class Transform(DagNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'rotatePivot' : (general.Pivot, 'rotatePivot'),
'scalePivot' : (general.Pivot, 'scalePivot')}
# def __getattr__(self, attr):
# try :
# return super(general.PyNode, self).__getattr__(attr)
# except AttributeError, msg:
# try:
# return self.getShape().attr(attr)
# except AttributeError:
# pass
#
# # it doesn't exist on the class
# try:
# return self.attr(attr)
# except MayaAttributeError, msg:
# # try the shape
# try: return self.getShape().attr(attr)
# except AttributeError: pass
# # since we're being called via __getattr__ we don't know whether the user was trying
# # to get a class method or a maya attribute, so we raise a more generic AttributeError
# raise AttributeError, msg
def __getattr__(self, attr):
"""
Checks in the following order:
1. Functions on this node class
2. Attributes on this node class
3. Functions on this node class's shape
4. Attributes on this node class's shape
"""
try :
#print "Transform.__getattr__(%r)" % attr
# Functions through normal inheritance
res = DependNode.__getattr__(self,attr)
except AttributeError, e:
# Functions via shape inheritance , and then, implicitly, Attributes
for shape in self.getShapes():
try:
return getattr(shape,attr)
except AttributeError: pass
raise e
return res
def __setattr__(self, attr, val):
"""
Checks in the following order:
1. Functions on this node class
2. Attributes on this node class
3. Functions on this node class's shape
4. Attributes on this node class's shape
"""
try :
#print "Transform.__setattr__", attr, val
# Functions through normal inheritance
return DependNode.__setattr__(self,attr,val)
except AttributeError, e:
# Functions via shape inheritance , and then, implicitly, Attributes
#print "Trying shape"
shape = self.getShape()
if shape:
try:
return setattr(shape,attr, val)
except AttributeError: pass
raise e
def attr(self, attr, checkShape=True):
"""
when checkShape is enabled, if the attribute does not exist the transform but does on the shape, then the shape's attribute will
be returned.
:rtype: `Attribute`
"""
#print "ATTR: Transform"
try :
res = self._attr(attr, checkShape)
except general.MayaAttributeError, e:
if checkShape:
try:
res = self.getShape().attr(attr)
except AttributeError:
raise e
raise e
return res
# def __getattr__(self, attr):
# if attr.startswith('__') and attr.endswith('__'):
# return super(general.PyNode, self).__getattr__(attr)
#
# at = Attribute( '%s.%s' % (self, attr) )
#
# # if the attribute does not exist on this node try the shape node
# if not at.exists():
# try:
# childAttr = getattr( self.getShape(), attr)
# try:
# if childAttr.exists():
# return childAttr
# except AttributeError:
# return childAttr
# except (AttributeError,TypeError):
# pass
#
# return at
#
# def __setattr__(self, attr,val):
# if attr.startswith('_'):
# attr = attr[1:]
#
# at = Attribute( '%s.%s' % (self, attr) )
#
# # if the attribute does not exist on this node try the shape node
# if not at.exists():
# try:
# childAttr = getattr( self.getShape(), attr )
# try:
# if childAttr.exists():
# return childAttr.set(val)
# except AttributeError:
# return childAttr.set(val)
# except (AttributeError,TypeError):
# pass
#
# return at.set(val)
"""
def move( self, *args, **kwargs ):
return move( self, *args, **kwargs )
def scale( self, *args, **kwargs ):
return scale( self, *args, **kwargs )
def rotate( self, *args, **kwargs ):
return rotate( self, *args, **kwargs )
def align( self, *args, **kwargs):
args = (self,) + args
cmds.align(self, *args, **kwargs)
"""
# NOTE : removed this via proxyClass
# # workaround for conflict with translate method on basestring
# def _getTranslate(self):
# return self.__getattr__("translate")
# def _setTranslate(self, val):
# return self.__setattr__("translate", val)
# translate = property( _getTranslate , _setTranslate )
def getShape( self, **kwargs ):
"""
:rtype: `DagNode`
"""
kwargs['shapes'] = True
try:
return self.getChildren( **kwargs )[0]
except IndexError:
pass
def getShapes( self, **kwargs ):
"""
:rtype: `DagNode`
"""
kwargs['shapes'] = True
return self.getChildren( **kwargs )
def ungroup( self, **kwargs ):
return cmds.ungroup( self, **kwargs )
# @_factories.editflag('xform','scale')
# def setScale( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','rotation')
# def setRotationOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','translation')
# def setTranslationOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','scalePivot')
# def setScalePivotOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','rotatePivot')
# def setRotatePivotOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','pivots')
# def setPivots( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
# @_factories.editflag('xform','rotateAxis')
# def setRotateAxisOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
#
# @_factories.editflag('xform','shear')
# def setShearingOld( self, val, **kwargs ):
# cmds.xform( self, **kwargs )
@_factories.addMelDocs('xform','rotateAxis')
def setMatrix( self, val, **kwargs ):
"""xform -scale"""
kwargs['matrix'] = val
cmds.xform( self, **kwargs )
# @_factories.queryflag('xform','scale')
# def getScaleOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
def _getSpaceArg(self, space, kwargs):
"for internal use only"
if kwargs.pop( 'worldSpace', kwargs.pop('ws', False) ):
space = 'world'
elif kwargs.pop( 'objectSpace', kwargs.pop('os', False) ):
space = 'object'
return space
def _isRelativeArg(self, kwargs ):
isRelative = kwargs.pop( 'relative', kwargs.pop('r', None) )
if isRelative is None:
isRelative = not kwargs.pop( 'absolute', kwargs.pop('a', True) )
return isRelative
# @_factories.queryflag('xform','translation')
# def getTranslationOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setTranslation' )
def setTranslation(self, vector, space='object', **kwargs):
if self._isRelativeArg(kwargs):
return self.translateBy(vector, space, **kwargs)
space = self._getSpaceArg(space, kwargs )
return self._setTranslation(vector, space=space)
@_factories.addApiDocs( _api.MFnTransform, 'getTranslation' )
def getTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getTranslation(space=space)
@_factories.addApiDocs( _api.MFnTransform, 'translateBy' )
def translateBy(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
curr = self._getTranslation(space)
self._translateBy(vector, space)
new = self._getTranslation(space)
undoItem = _factories.ApiUndoItem(Transform.setTranslation, (self, new, space), (self, curr, space) )
_factories.apiUndo.append( undoItem )
@_factories.addApiDocs( _api.MFnTransform, 'setScale' )
def setScale(self, scale, **kwargs):
if self._isRelativeArg(kwargs):
return self.scaleBy(scale, **kwargs)
return self._setScale(scale)
@_factories.addApiDocs( _api.MFnTransform, 'scaleBy' )
def scaleBy(self, scale, **kwargs):
curr = self.getScale()
self._scaleBy(scale)
new = self.getScale()
undoItem = _factories.ApiUndoItem(Transform.setScale, (self, new), (self, curr) )
_factories.apiUndo.append( undoItem )
@_factories.addApiDocs( _api.MFnTransform, 'setShear' )
def setShear(self, shear, **kwargs):
if self._isRelativeArg(kwargs):
return self.shearBy(shear, **kwargs)
return self._setShear(shear)
@_factories.addApiDocs( _api.MFnTransform, 'shearBy' )
def shearBy(self, shear, **kwargs):
curr = self.getShear()
self._shearBy(shear)
new = self.getShear()
undoItem = _factories.ApiUndoItem(Transform.setShear, (self, new), (self, curr) )
_factories.apiUndo.append( undoItem )
# @_factories.queryflag('xform','rotatePivot')
# def getRotatePivotOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setRotatePivot' )
def setRotatePivot(self, point, space='object', balance=True, **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setRotatePivot(point, space=space, balance=balance)
@_factories.addApiDocs( _api.MFnTransform, 'rotatePivot' )
def getRotatePivot(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getRotatePivot(space=space)
@_factories.addApiDocs( _api.MFnTransform, 'setRotatePivotTranslation' )
def setRotatePivotTranslation(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setRotatePivotTranslation(vector, space=space)
@_factories.addApiDocs( _api.MFnTransform, 'rotatePivotTranslation' )
def getRotatePivotTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getRotatePivotTranslation(space=space)
# @_factories.queryflag('xform','rotation')
# def getRotationOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setRotation' )
def setRotation(self, rotation, space='object', **kwargs):
'''
Modifications:
- rotation may be given as an EulerRotation, Quaternion, or iterable of 3
or 4 components (to specify an euler/quaternion, respectively)
'''
# quaternions are the only method that support a space parameter
if self._isRelativeArg(kwargs):
return self.rotateBy(rotation, space, **kwargs)
spaceIndex = datatypes.Spaces.getIndex(self._getSpaceArg(space, kwargs))
if not isinstance(rotation, (_api.MQuaternion, _api.MEulerRotation)):
rotation = list(rotation)
if len(rotation) == 3:
# using datatypes.Angle(x) means current angle-unit should be
# respected
rotation = [ datatypes.Angle( x ).asRadians() for x in rotation ]
rotation = _api.MEulerRotation( *rotation )
elif len(rotation) == 4:
rotation = _api.MQuaternion(*rotation)
else:
raise ValueError("rotation given to setRotation must have either 3 or 4 elements (for euler or quaternion, respectively)")
if isinstance(rotation, _api.MEulerRotation):
# MFnTransform.setRotation doesn't have a (non-deprecated) override
# which takes euler angles AND a transform space... this sort of
# makes sense, since the "unique" information that euler angles can
# potentially carry - ie, rotation > 360 degress - only really makes
# sense within the "transform" space. So, only use EulerRotation if
# we're using transform space...
if datatypes.equivalentSpace(spaceIndex, _api.MSpace.kTransform,
rotationOnly=True):
self.__apimfn__().setRotation(rotation)
return
else:
rotation = rotation.asQuaternion()
self.__apimfn__().setRotation(rotation, spaceIndex )
# @_factories.addApiDocs( _api.MFnTransform, 'getRotation' )
# def getRotation(self, space='object', **kwargs):
# # quaternions are the only method that support a space parameter
# space = self._getSpaceArg(space, kwargs )
# quat = _api.MQuaternion()
# _api.MFnTransform(self.__apimfn__()).getRotation(quat, datatypes.Spaces.getIndex(space) )
# return datatypes.EulerRotation( quat.asEulerRotation() )
@_factories.addApiDocs( _api.MFnTransform, 'getRotation', overloadIndex=1 )
def getRotation(self, space='object', quaternion=False, **kwargs):
'''
Modifications:
- added 'quaternion' keyword arg, to specify whether the result
be returned as a Quaternion object, as opposed to the default
EulerRotation object
- added 'space' keyword arg, which defaults to 'object'
'''
# quaternions are the only method that support a space parameter
space = self._getSpaceArg(space, kwargs )
if space.lower() in ('object', 'pretransform', 'transform') and not quaternion:
# In this case, we can just go straight to the EulerRotation,
# without having to go through Quaternion - this means we will
# get information like angles > 360 degrees
euler = _api.MEulerRotation()
self.__apimfn__().getRotation(euler)
rot = datatypes.EulerRotation(euler)
else:
rot = self._getRotation(space=space)
if not quaternion:
rot = rot.asEulerRotation()
if isinstance(rot, datatypes.EulerRotation):
rot.setDisplayUnit( datatypes.Angle.getUIUnit() )
return rot
@_factories.addApiDocs( _api.MFnTransform, 'rotateBy' )
def rotateBy(self, rotation, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
curr = self.getRotation(space)
self._rotateBy(rotation, space)
new = self.getRotation(space)
undoItem = _factories.ApiUndoItem(Transform.setRotation, (self, new, space), (self, curr, space) )
_factories.apiUndo.append( undoItem )
# @_factories.queryflag('xform','scalePivot')
# def getScalePivotOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.addApiDocs( _api.MFnTransform, 'setScalePivot' )
def setScalePivot(self, point, space='object', balance=True, **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setScalePivot(point, space=space, balance=balance)
@_factories.addApiDocs( _api.MFnTransform, 'scalePivot' )
def getScalePivot(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getScalePivot(space=space)
@_factories.addApiDocs( _api.MFnTransform, 'setScalePivotTranslation' )
def setScalePivotTranslation(self, vector, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._setScalePivotTranslation(vector, space=space)
@_factories.addApiDocs( _api.MFnTransform, 'scalePivotTranslation' )
def getScalePivotTranslation(self, space='object', **kwargs):
space = self._getSpaceArg(space, kwargs )
return self._getScalePivotTranslation(space=space)
@_factories.queryflag('xform','pivots')
def getPivots( self, **kwargs ):
res = cmds.xform( self, **kwargs )
return ( datatypes.Vector( res[:3] ), datatypes.Vector( res[3:] ) )
@_factories.queryflag('xform','rotateAxis')
def getRotateAxis( self, **kwargs ):
return datatypes.Vector( cmds.xform( self, **kwargs ) )
# @_factories.queryflag('xform','shear')
# def getShearOld( self, **kwargs ):
# return datatypes.Vector( cmds.xform( self, **kwargs ) )
@_factories.queryflag('xform','matrix')
def getMatrix( self, **kwargs ):
return datatypes.Matrix( cmds.xform( self, **kwargs ) )
#TODO: create API equivalent of `xform -boundingBoxInvisible` so we can replace this with _api.
def getBoundingBox(self, invisible=False, space='object'):
"""xform -boundingBox and xform -boundingBoxInvisible
:rtype: `BoundingBox`
"""
kwargs = {'query' : True }
if invisible:
kwargs['boundingBoxInvisible'] = True
else:
kwargs['boundingBox'] = True
if space=='object':
kwargs['objectSpace'] = True
elif space=='world':
kwargs['worldSpace'] = True
else:
raise ValueError('unknown space %r' % space)
res = cmds.xform( self, **kwargs )
#return ( datatypes.Vector(res[:3]), datatypes.Vector(res[3:]) )
return datatypes.BoundingBox( res[:3], res[3:] )
def getBoundingBoxMin(self, invisible=False, space='object'):
"""
:rtype: `Vector`
"""
return self.getBoundingBox(invisible, space)[0]
#return self.getBoundingBox(invisible).min()
def getBoundingBoxMax(self, invisible=False, space='object'):
"""
:rtype: `Vector`
"""
return self.getBoundingBox(invisible, space)[1]
#return self.getBoundingBox(invisible).max()
# def centerPivots(self, **kwargs):
# """xform -centerPivots"""
# kwargs['centerPivots'] = True
# cmds.xform( self, **kwargs )
#
# def zeroTransformPivots(self, **kwargs):
# """xform -zeroTransformPivots"""
# kwargs['zeroTransformPivots'] = True
# cmds.xform( self, **kwargs )
class Joint(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
connect = _factories.functionFactory( cmds.connectJoint, rename='connect')
disconnect = _factories.functionFactory( cmds.disconnectJoint, rename='disconnect')
insert = _factories.functionFactory( cmds.insertJoint, rename='insert')
if versions.isUnlimited():
class FluidEmitter(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
fluidVoxelInfo = _factories.functionFactory( cmds.fluidVoxelInfo, rename='fluidVoxelInfo')
loadFluid = _factories.functionFactory( cmds.loadFluid, rename='loadFluid')
resampleFluid = _factories.functionFactory( cmds.resampleFluid, rename='resampleFluid')
saveFluid = _factories.functionFactory( cmds.saveFluid, rename='saveFluid')
setFluidAttr = _factories.functionFactory( cmds.setFluidAttr, rename='setFluidAttr')
getFluidAttr = _factories.functionFactory( cmds.getFluidAttr, rename='getFluidAttr')
class RenderLayer(DependNode):
def listMembers(self, fullNames=True):
if fullNames:
return map( general.PyNode, _util.listForNone( cmds.editRenderLayerMembers( self, q=1, fullNames=True) ) )
else:
return _util.listForNone( cmds.editRenderLayerMembers( self, q=1, fullNames=False) )
def addMembers(self, members, noRecurse=True):
cmds.editRenderLayerMembers( self, members, noRecurse=noRecurse )
def removeMembers(self, members ):
cmds.editRenderLayerMembers( self, members, remove=True )
def listAdjustments(self):
return map( general.PyNode, _util.listForNone( cmds.editRenderLayerAdjustment( self, layer=1, q=1) ) )
def addAdjustments(self, members):
return cmds.editRenderLayerAdjustment( members, layer=self )
def removeAdjustments(self, members ):
return cmds.editRenderLayerAdjustment( members, layer=self, remove=True )
def setCurrent(self):
cmds.editRenderLayerGlobals( currentRenderLayer=self)
class DisplayLayer(DependNode):
def listMembers(self, fullNames=True):
if fullNames:
return map( general.PyNode, _util.listForNone( cmds.editDisplayLayerMembers( self, q=1, fullNames=True) ) )
else:
return _util.listForNone( cmds.editDisplayLayerMembers( self, q=1, fullNames=False) )
def addMembers(self, members, noRecurse=True):
cmds.editDisplayLayerMembers( self, members, noRecurse=noRecurse )
def removeMembers(self, members ):
cmds.editDisplayLayerMembers( self, members, remove=True )
def setCurrent(self):
cmds.editDisplayLayerMembers( currentDisplayLayer=self)
class Constraint(Transform):
def setWeight( self, weight, *targetObjects ):
inFunc = getattr( cmds, self.type() )
if not targetObjects:
targetObjects = self.getTargetList()
constraintObj = self.constraintParentInverseMatrix.inputs()[0]
args = list(targetObjects) + [constraintObj]
return inFunc( *args, **{'edit':True, 'weight':weight} )
def getWeight( self, *targetObjects ):
inFunc = getattr( cmds, self.type() )
if not targetObjects:
targetObjects = self.getTargetList()
constraintObj = self.constraintParentInverseMatrix.inputs()[0]
args = list(targetObjects) + [constraintObj]
return inFunc( *args, **{'query':True, 'weight':True} )
class GeometryShape(Shape):
def __getattr__(self, attr):
#print "Mesh.__getattr__", attr
try:
return self.comp(attr)
except general.MayaComponentError:
#print "getting super", attr
return super(GeometryShape,self).__getattr__(attr)
class DeformableShape(GeometryShape):
@classmethod
def _numCVsFunc_generator(cls, formFunc, spansPlusDegreeFunc, spansFunc,
name=None, doc=None):
"""
Intended to be used by NurbsCurve / NurbsSurface to generate
functions which give the 'true' number of editable CVs,
as opposed to just numSpans + degree.
(The two values will differ if we have a periodic curve).
Note that this will usually need to be called outside/after the
class definition, as formFunc/spansFunc/etc will not be defined
until then, as they are added by the metaclass.
"""
def _numCvs_generatedFunc(self, editableOnly=True):
if editableOnly and formFunc(self) == self.Form.periodic:
return spansFunc(self)
else:
return spansPlusDegreeFunc(self)
if name:
_numCvs_generatedFunc.__name__ = name
if doc:
_numCvs_generatedFunc.__doc__ = doc
return _numCvs_generatedFunc
@classmethod
def _numEPsFunc_generator(cls, formFunc, spansFunc,
name=None, doc=None):
"""
Intended to be used by NurbsCurve / NurbsSurface to generate
functions which give the 'true' number of editable EPs,
as opposed to just numSpans.
(The two values will differ if we have a periodic curve).
Note that this will usually need to be called outside/after the
class definition, as formFunc/spansFunc will not be defined
until then, as they are added by the metaclass.
"""
def _numEPs_generatedFunc(self, editableOnly=True):
if editableOnly and formFunc(self) == self.Form.periodic:
return spansFunc(self)
else:
return spansFunc(self) + 1
if name:
_numEPs_generatedFunc.__name__ = name
if doc:
_numEPs_generatedFunc.__doc__ = doc
return _numEPs_generatedFunc
class ControlPoint(DeformableShape): pass
class CurveShape(DeformableShape): pass
class NurbsCurve(CurveShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'u' : general.NurbsCurveParameter,
'cv' : general.NurbsCurveCV,
'controlVerts': general.NurbsCurveCV,
'ep' : general.NurbsCurveEP,
'editPoints' : general.NurbsCurveEP,
'knot' : general.NurbsCurveKnot,
'knots' : general.NurbsCurveKnot}
# apiToMelBridge maps MFnNurbsCurve.numCVs => NurbsCurve._numCVsApi
NurbsCurve.numCVs = \
NurbsCurve._numCVsFunc_generator(NurbsCurve.form,
NurbsCurve._numCVsApi,
NurbsCurve.numSpans,
name='numCVs',
doc =
"""
Returns the number of CVs.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for cv's - ie, if
myCurve.numCVs(editableOnly=True) == 4
then allowable cv indices go from
myCurve.cv[0] to mySurf.cv[3]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve -
degree + numSpans.
These will only differ if the form is 'periodic', in which
case the editable number will be numSpans (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degree + numSpans.
:Examples:
>>> from pymel.core import *
>>> # a periodic curve
>>> myCurve = curve(name='periodicCurve1', d=3, periodic=True, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.cv
NurbsCurveCV(u'periodicCurveShape1.cv[0:7]')
>>> myCurve.numCVs()
8
>>> myCurve.numCVs(editableOnly=False)
11
>>>
>>> # an open curve
>>> myCurve = curve(name='openCurve1', d=3, periodic=False, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.cv
NurbsCurveCV(u'openCurveShape1.cv[0:10]')
>>> myCurve.numCVs()
11
>>> myCurve.numCVs(editableOnly=False)
11
:rtype: `int`
""")
NurbsCurve.numEPs = \
NurbsCurve._numEPsFunc_generator(NurbsCurve.form,
NurbsCurve.numSpans,
name='numEPs',
doc =
"""
Returns the number of EPs.
:Examples:
>>> from pymel.core import *
>>> # a periodic curve
>>> myCurve = curve(name='periodicCurve2', d=3, periodic=True, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.ep
NurbsCurveEP(u'periodicCurveShape2.ep[0:7]')
>>> myCurve.numEPs()
8
>>>
>>> # an open curve
>>> myCurve = curve(name='openCurve2', d=3, periodic=False, k=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1)] )
>>> myCurve.ep
NurbsCurveEP(u'openCurveShape2.ep[0:8]')
>>> myCurve.numEPs()
9
:rtype: `int`
""")
class SurfaceShape(ControlPoint): pass
class NurbsSurface(SurfaceShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'u' : (general.NurbsSurfaceRange, 'u'),
'uIsoparm' : (general.NurbsSurfaceRange, 'u'),
'v' : (general.NurbsSurfaceRange, 'v'),
'vIsoparm' : (general.NurbsSurfaceRange, 'v'),
'uv' : (general.NurbsSurfaceRange, 'uv'),
'cv' : general.NurbsSurfaceCV,
'controlVerts': general.NurbsSurfaceCV,
'ep' : general.NurbsSurfaceEP,
'editPoints' : general.NurbsSurfaceEP,
'knot' : general.NurbsSurfaceKnot,
'knots' : general.NurbsSurfaceKnot,
'sf' : general.NurbsSurfaceFace,
'faces' : general.NurbsSurfaceFace}
# apiToMelBridge maps MFnNurbsCurve._numCVsInU => NurbsCurve._numCVsInUApi
NurbsSurface.numCVsInU = \
NurbsSurface._numCVsFunc_generator(NurbsSurface.formInU,
NurbsSurface._numCVsInUApi,
NurbsSurface.numSpansInU,
name='numCVsInU',
doc =
"""
Returns the number of CVs in the U direction.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for u - ie, if
mySurf.numCVsInU(editableOnly=True) == 4
then allowable u indices go from
mySurf.cv[0][*] to mySurf.cv[3][*]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve in u -
degreeU + numSpansInU.
These will only differ if the form in u is 'periodic', in which
case the editable number will be numSpansInU (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degreeU + numSpansInU.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf1', du=3, dv=1, fu='periodic', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((7, 0), label=None)]
>>> mySurf.numCVsInU()
8
>>> mySurf.numCVsInU(editableOnly=False)
11
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf1', du=3, dv=1, fu='open', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=((4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)) )
>>> sorted(mySurf.cv[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((10, 0), label=None)]
>>> mySurf.numCVsInU()
11
>>> mySurf.numCVsInU(editableOnly=False)
11
:rtype: `int`
""")
# apiToMelBridge maps MFnNurbsCurve._numCVsInV => NurbsCurve._numCVsInVApi
NurbsSurface.numCVsInV = \
NurbsSurface._numCVsFunc_generator(NurbsSurface.formInV,
NurbsSurface._numCVsInVApi,
NurbsSurface.numSpansInV,
name='numCVsInV',
doc =
"""
Returns the number of CVs in the V direction.
:Parameters:
editableOnly : `bool`
If editableOnly evaluates to True (default), then this will return
the number of cvs that can be actually edited (and also the highest
index that may be used for v - ie, if
mySurf.numCVsInV(editableOnly=True) == 4
then allowable v indices go from
mySurf.cv[*][0] to mySurf.cv[*][3]
If editablyOnly is False, then this will return the underlying
number of cvs used to define the mathematical curve in v -
degreeV + numSpansInV.
These will only differ if the form in v is 'periodic', in which
case the editable number will be numSpansInV (as the last 'degree'
cv's are 'locked' to be the same as the first 'degree' cvs).
In all other cases, the number of cvs will be degreeV + numSpansInV.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf2', du=1, dv=3, fu='open', fv='periodic', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 7), label='cv')]
>>> mySurf.numCVsInV()
8
>>> mySurf.numCVsInV(editableOnly=False)
11
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf2', du=1, dv=3, fu='open', fv='open', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.cv[0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 10), label='cv')]
>>> mySurf.numCVsInV()
11
>>> mySurf.numCVsInV(editableOnly=False)
11
:rtype: `int`
""")
NurbsSurface.numEPsInU = \
NurbsSurface._numEPsFunc_generator(NurbsSurface.formInU,
NurbsSurface.numSpansInU,
name='numEPsInU',
doc =
"""
Returns the number of EPs in the U direction.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf3', du=3, dv=1, fu='periodic', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((7, 0), label=None)]
>>> mySurf.numEPsInU()
8
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf3', du=3, dv=1, fu='open', fv='open', ku=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), kv=(0, 1), pw=[(4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1), (0, 5.5, 0, 1), (0, 5.5, -2.5, 1), (-4, 4, 0, 1), (-4, 4, -2.5, 1), (-5.5, 0, 0, 1), (-5.5, 0, -2.5, 1), (-4, -4, 0, 1), (-4, -4, -2.5, 1), (0, -5.5, 0, 1), (0, -5.5, -2.5, 1), (4, -4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, 0, 1), (5.5, 0, -2.5, 1), (4, 4, 0, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[:][0].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((8, 0), label=None)]
>>> mySurf.numEPsInU()
9
:rtype: `int`
""")
NurbsSurface.numEPsInV = \
NurbsSurface._numEPsFunc_generator(NurbsSurface.formInV,
NurbsSurface.numSpansInV,
name='numEPsInV',
doc =
"""
Returns the number of EPs in the V direction.
:Examples:
>>> from pymel.core import *
>>> # a periodic surface
>>> mySurf = surface(name='periodicSurf4', du=1, dv=3, fu='open', fv='periodic', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[0][:].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 7), label=None)]
>>> mySurf.numEPsInV()
8
>>>
>>> # an open surface
>>> mySurf = surface(name='openSurf4', du=1, dv=3, fu='open', fv='open', ku=(0, 1), kv=(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12), pw=[(4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (0, 5.5, 0, 1), (-4, 4, 0, 1), (-5.5, 0, 0, 1), (-4, -4, 0, 1), (0, -5.5, 0, 1), (4, -4, 0, 1), (5.5, 0, 0, 1), (4, 4, 0, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1), (0, 5.5, -2.5, 1), (-4, 4, -2.5, 1), (-5.5, 0, -2.5, 1), (-4, -4, -2.5, 1), (0, -5.5, -2.5, 1), (4, -4, -2.5, 1), (5.5, 0, -2.5, 1), (4, 4, -2.5, 1)] )
>>> sorted(mySurf.ep[0][:].indices()) # doctest: +ELLIPSIS
[ComponentIndex((0, 0), ... ComponentIndex((0, 8), label=None)]
>>> mySurf.numEPsInV()
9
:rtype: `int`
""")
class Mesh(SurfaceShape):
"""
The Mesh class provides wrapped access to many API methods for querying and modifying meshes. Be aware that
modifying meshes using API commands outside of the context of a plugin is still somewhat uncharted territory,
so proceed at our own risk.
The component types can be accessed from the `Mesh` type (or it's transform) using the names you are
familiar with from MEL:
>>> from pymel.core import *
>>> p = polySphere( name='theMoon', sa=7, sh=7 )[0]
>>> p.vtx
MeshVertex(u'theMoonShape.vtx[0:43]')
>>> p.e
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.f
MeshFace(u'theMoonShape.f[0:48]')
They are also accessible from their more descriptive alternatives:
>>> p.verts
MeshVertex(u'theMoonShape.vtx[0:43]')
>>> p.edges
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.faces
MeshFace(u'theMoonShape.f[0:48]')
As you'd expect, these components are all indexible:
>>> p.vtx[0]
MeshVertex(u'theMoonShape.vtx[0]')
The classes themselves contain methods for getting information about the component.
>>> p.vtx[0].connectedEdges()
MeshEdge(u'theMoonShape.e[0,6,42,77]')
This class provides support for python's extended slice notation. Typical maya ranges express a start and stop value separated
by a colon. Extended slices add a step parameter and can also represent multiple ranges separated by commas.
Thus, a single component object can represent any collection of indices.
This includes start, stop, and step values.
>>> # do every other edge between 0 and 10
>>> for edge in p.e[0:10:2]:
... print edge
...
theMoonShape.e[0]
theMoonShape.e[2]
theMoonShape.e[4]
theMoonShape.e[6]
theMoonShape.e[8]
theMoonShape.e[10]
Negative indices can be used for getting indices relative to the end:
>>> p.edges # the full range
MeshEdge(u'theMoonShape.e[0:90]')
>>> p.edges[5:-10] # index 5 through to 10 from the last
MeshEdge(u'theMoonShape.e[5:80]')
Just like with python ranges, you can leave an index out, and the logical result will follow:
>>> p.edges[:-10] # from the beginning
MeshEdge(u'theMoonShape.e[0:80]')
>>> p.edges[20:]
MeshEdge(u'theMoonShape.e[20:90]')
Or maybe you want the position of every tenth vert:
>>> for x in p.vtx[::10]:
... print x, x.getPosition()
...
theMoonShape.vtx[0] [0.270522117615, -0.900968849659, -0.339223951101]
theMoonShape.vtx[10] [-0.704405844212, -0.623489797115, 0.339223951101]
theMoonShape.vtx[20] [0.974927902222, -0.222520858049, 0.0]
theMoonShape.vtx[30] [-0.704405784607, 0.623489797115, -0.339224010706]
theMoonShape.vtx[40] [0.270522087812, 0.900968849659, 0.339223980904]
To be compatible with Maya's range notation, these slices are inclusive of the stop index.
>>> # face at index 8 will be included in the sequence
>>> for f in p.f[4:8]: print f
...
theMoonShape.f[4]
theMoonShape.f[5]
theMoonShape.f[6]
theMoonShape.f[7]
theMoonShape.f[8]
>>> from pymel.core import *
>>> obj = polyTorus()[0]
>>> colors = []
>>> for i, vtx in enumerate(obj.vtx): # doctest: +SKIP
... edgs=vtx.toEdges() # doctest: +SKIP
... totalLen=0 # doctest: +SKIP
... edgCnt=0 # doctest: +SKIP
... for edg in edgs: # doctest: +SKIP
... edgCnt += 1 # doctest: +SKIP
... l = edg.getLength() # doctest: +SKIP
... totalLen += l # doctest: +SKIP
... avgLen=totalLen / edgCnt # doctest: +SKIP
... #print avgLen # doctest: +SKIP
... currColor = vtx.getColor(0) # doctest: +SKIP
... color = datatypes.Color.black # doctest: +SKIP
... # only set blue if it has not been set before
... if currColor.b<=0.0: # doctest: +SKIP
... color.b = avgLen # doctest: +SKIP
... color.r = avgLen # doctest: +SKIP
... colors.append(color) # doctest: +SKIP
"""
__metaclass__ = _factories.MetaMayaNodeWrapper
# def __init__(self, *args, **kwargs ):
# SurfaceShape.__init__(self, self._apiobject )
# self.vtx = MeshEdge(self.__apimobject__() )
_componentAttributes = {'vtx' : general.MeshVertex,
'verts' : general.MeshVertex,
'e' : general.MeshEdge,
'edges' : general.MeshEdge,
'f' : general.MeshFace,
'faces' : general.MeshFace,
'map' : general.MeshUV,
'uvs' : general.MeshUV,
'vtxFace' : general.MeshVertexFace,
'faceVerts' : general.MeshVertexFace}
# Unfortunately, objects that don't yet have any mesh data - ie, if you do
# createNode('mesh') - can't be fed into MFnMesh (even though it is a mesh
# node). This means that all the methods wrapped from MFnMesh won't be
# usable in this case. While it might make sense for some methods - ie,
# editing methods like collapseEdges - to fail in this situation, some
# basic methods like numVertices should still be usable. Therefore,
# we override some of these with the mel versions (which still work...)
numVertices = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'vertex', 'numVertices' )
numEdges = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'edge', 'numEdges' )
numFaces = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'face', 'numFaces' )
numTriangles = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'triangles', 'numTriangles' )
numSelectedTriangles = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'triangleComponent', 'numSelectedTriangles' )
numSelectedFaces = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'faceComponent', 'numSelectedFaces' )
numSelectedEdges = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'edgeComponent', 'numSelectedEdges' )
numSelectedVertices = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'vertexComponent', 'numSelectedVertices' )
area = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'area' )
worldArea = _factories.makeCreateFlagMethod( cmds.polyEvaluate, 'worldArea' )
if versions.current() >= versions.v2009:
@_factories.addApiDocs( _api.MFnMesh, 'currentUVSetName' )
def getCurrentUVSetName(self):
return self.__apimfn__().currentUVSetName( self.instanceNumber() )
@_factories.addApiDocs( _api.MFnMesh, 'currentColorSetName' )
def getCurrentColorSetName(self):
return self.__apimfn__().currentColorSetName( self.instanceNumber() )
else:
@_factories.addApiDocs( _api.MFnMesh, 'currentUVSetName' )
def getCurrentUVSetName(self):
return self.__apimfn__().currentUVSetName()
@_factories.addApiDocs( _api.MFnMesh, 'currentColorSetName' )
def getCurrentColorSetName(self):
return self.__apimfn__().currentColorSetName()
@_factories.addApiDocs( _api.MFnMesh, 'numColors' )
def numColors(self, colorSet=None):
mfn = self.__apimfn__()
# If we have an empty mesh, we will get an MFnDagNode...
if not isinstance(mfn, _api.MFnMesh):
return 0
args = []
if colorSet:
args.append(colorSet)
return mfn.numColors(*args)
# Unfortunately, objects that don't yet have any mesh data - ie, if you do
# createNode('mesh') - can't be fed into MFnMesh (even though it is a mesh
# node). This means that all the methods wrapped from MFnMesh won't be
# usable in this case. While it might make sense for some methods - ie,
# editing methods like collapseEdges - to fail in this situation, some
# basic methods like numVertices should still be usable. Therefore,
# we override some of these with the mel versions (which still work...)
def _makeApiMethodWrapForEmptyMesh(apiMethodName, baseMethodName=None,
resultName=None, defaultVal=0):
if baseMethodName is None:
baseMethodName = '_' + apiMethodName
if resultName is None:
resultName = apiMethodName
baseMethod = getattr(Mesh, baseMethodName)
@_factories.addApiDocs( _api.MFnMesh, apiMethodName )
def methodWrapForEmptyMesh(self, *args, **kwargs):
# If we have an empty mesh, we will get an MFnDagNode...
mfn = self.__apimfn__()
if not isinstance(mfn, _api.MFnMesh):
return defaultVal
return baseMethod(self, *args, **kwargs)
methodWrapForEmptyMesh.__name__ = resultName
return methodWrapForEmptyMesh
for _apiMethodName in '''numColorSets
numFaceVertices
numNormals
numUVSets
numUVs'''.split():
_wrappedFunc = _makeApiMethodWrapForEmptyMesh(_apiMethodName)
setattr(Mesh, _wrappedFunc.__name__, _wrappedFunc)
class Subdiv(SurfaceShape):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'smp' : general.SubdVertex,
'verts' : general.SubdVertex,
'sme' : general.SubdEdge,
'edges' : general.SubdEdge,
'smf' : general.SubdFace,
'faces' : general.SubdFace,
'smm' : general.SubdUV,
'uvs' : general.SubdUV}
def getTweakedVerts(self, **kwargs):
return cmds.querySubdiv( action=1, **kwargs )
def getSharpenedVerts(self, **kwargs):
return cmds.querySubdiv( action=2, **kwargs )
def getSharpenedEdges(self, **kwargs):
return cmds.querySubdiv( action=3, **kwargs )
def getEdges(self, **kwargs):
return cmds.querySubdiv( action=4, **kwargs )
def cleanTopology(self):
cmds.subdCleanTopology(self)
class Lattice(ControlPoint):
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'pt' : general.LatticePoint,
'points': general.LatticePoint}
class Particle(DeformableShape):
__apicls__ = _api.MFnParticleSystem
__metaclass__ = _factories.MetaMayaNodeWrapper
_componentAttributes = {'pt' : general.ParticleComponent,
'points': general.ParticleComponent}
# for backwards compatibility
Point = general.ParticleComponent
# for backwards compatibility, keep these two, even though the api wrap
# will also provide 'count'
def pointCount(self):
return cmds.particle( self, q=1,count=1)
num = pointCount
class SelectionSet( _api.MSelectionList):
apicls = _api.MSelectionList
__metaclass__ = _factories.MetaMayaTypeWrapper
def __init__(self, objs):
""" can be initialized from a list of objects, another SelectionSet, an MSelectionList, or an ObjectSet"""
if isinstance(objs, _api.MSelectionList ):
_api.MSelectionList.__init__(self, objs)
elif isinstance(objs, ObjectSet ):
_api.MSelectionList.__init__(self, objs.asSelectionSet() )
else:
_api.MSelectionList.__init__(self)
for obj in objs:
if isinstance(obj, (DependNode, DagNode) ):
self.apicls.add( self, obj.__apiobject__() )
elif isinstance(obj, general.Attribute):
self.apicls.add( self, obj.__apiobject__(), True )
# elif isinstance(obj, Component):
# sel.add( obj.__apiobject__(), True )
elif isinstance( obj, basestring ):
self.apicls.add( self, obj )
else:
raise TypeError
def __melobject__(self):
# If the list contains components, THEIR __melobject__ is a list -
# so need to iterate through, and flatten if needed
melList = []
for selItem in self:
selItem = selItem.__melobject__()
if _util.isIterable(selItem):
melList.extend(selItem)
else:
melList.append(selItem)
return melList
def __len__(self):
""":rtype: `int` """
return self.apicls.length(self)
def __contains__(self, item):
""":rtype: `bool` """
if isinstance(item, (DependNode, DagNode, general.Attribute) ):
return self.apicls.hasItem(self, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.hasItem(self, general.PyNode(item).__apiobject__())
def __repr__(self):
""":rtype: `str` """
names = []
self.apicls.getSelectionStrings( self, names )
return 'nt.%s(%s)' % ( self.__class__.__name__, names )
def __getitem__(self, index):
""":rtype: `PyNode` """
if index >= len(self):
raise IndexError, "index out of range"
plug = _api.MPlug()
obj = _api.MObject()
dag = _api.MDagPath()
comp = _api.MObject()
# Go from most specific to least - plug, dagPath, dependNode
try:
self.apicls.getPlug( self, index, plug )
assert not plug.isNull()
except (RuntimeError, AssertionError):
try:
self.apicls.getDagPath( self, index, dag, comp )
except RuntimeError:
try:
self.apicls.getDependNode( self, index, obj )
return general.PyNode( obj )
except:
pass
else:
if comp.isNull():
return general.PyNode( dag )
else:
return general.PyNode( dag, comp )
else:
return general.PyNode( plug )
def __setitem__(self, index, item):
if isinstance(item, (DependNode, DagNode, general.Attribute) ):
return self.apicls.replace(self, index, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.replace(self, general.PyNode(item).__apiobject__())
def __and__(self, s):
"operator for `SelectionSet.getIntersection`"
return self.getIntersection(s)
def __iand__(self, s):
"operator for `SelectionSet.intersection`"
return self.intersection(s)
def __or__(self, s):
"operator for `SelectionSet.getUnion`"
return self.getUnion(s)
def __ior__(self, s):
"operator for `SelectionSet.union`"
return self.union(s)
def __lt__(self, s):
"operator for `SelectionSet.isSubSet`"
return self.isSubSet(s)
def __gt__(self, s):
"operator for `SelectionSet.isSuperSet`"
return self.isSuperSet(s)
def __sub__(self, s):
"operator for `SelectionSet.getDifference`"
return self.getDifference(s)
def __isub__(self, s):
"operator for `SelectionSet.difference`"
return self.difference(s)
def __xor__(self, s):
"operator for `SelectionSet.symmetricDifference`"
return self.getSymmetricDifference(s)
def __ixor__(self, s):
"operator for `SelectionSet.symmetricDifference`"
return self.symmetricDifference(s)
def add(self, item):
if isinstance(item, (DependNode, DagNode, general.Attribute) ):
return self.apicls.add(self, item.__apiobject__())
elif isinstance(item, general.Component):
raise NotImplementedError, 'Components not yet supported'
else:
return self.apicls.add(self, general.PyNode(item).__apiobject__())
def pop(self, index):
""":rtype: `PyNode` """
if index >= len(self):
raise IndexError, "index out of range"
return self.apicls.remove(self, index )
def isSubSet(self, other):
""":rtype: `bool`"""
if isinstance(other, ObjectSet):
other = other.asSelectionSet()
return set(self).issubset(other)
def isSuperSet(self, other, flatten=True ):
""":rtype: `bool`"""
if isinstance(other, ObjectSet):
other = other.asSelectionSet()
return set(self).issuperset(other)
def getIntersection(self, other):
""":rtype: `SelectionSet`"""
# diff = self-other
# intersect = self-diff
diff = self.getDifference(other)
return self.getDifference(diff)
def intersection(self, other):
diff = self.getDifference(other)
self.difference(diff)
def getDifference(self, other):
""":rtype: `SelectionSet`"""
# create a new SelectionSet so that we don't modify our current one
newSet = SelectionSet( self )
newSet.difference(other)
return newSet
def difference(self, other):
if not isinstance( other, _api.MSelectionList ):
other = SelectionSet( other )
self.apicls.merge( self, other, _api.MSelectionList.kRemoveFromList )
def getUnion(self, other):
""":rtype: `SelectionSet`"""
newSet = SelectionSet( self )
newSet.union(other)
return newSet
def union(self, other):
if not isinstance( other, _api.MSelectionList ):
other = SelectionSet( other )
self.apicls.merge( self, other, _api.MSelectionList.kMergeNormal )
def getSymmetricDifference(self, other):
"""
Also known as XOR
:rtype: `SelectionSet`
"""
# create a new SelectionSet so that we don't modify our current one
newSet = SelectionSet( self )
newSet.symmetricDifference(other)
return newSet
def symmetricDifference(self, other):
if not isinstance( other, _api.MSelectionList ):
other = SelectionSet( other )
# FIXME: does kXOR exist? completion says only kXORWithList exists
self.apicls.merge( self, other, _api.MSelectionList.kXOR )
def asObjectSet(self):
return general.sets( self )
# def intersect(self, other):
# self.apicls.merge( other, _api.MSelectionList.kXORWithList )
class ObjectSet(Entity):
"""
The ObjectSet class and `SelectionSet` class work together. Both classes have a very similar interface,
the primary difference is that the ObjectSet class represents connections to an objectSet node, while the
`SelectionSet` class is a generic set, akin to pythons built-in `set`.
create some sets:
>>> from pymel.core import *
>>> f=newFile(f=1) #start clean
>>>
>>> s = sets() # create an empty set
>>> s.union( ls( type='camera') ) # add some cameras to it
>>> s.members() # doctest: +SKIP
[nt.Camera(u'sideShape'), nt.Camera(u'frontShape'), nt.Camera(u'topShape'), nt.Camera(u'perspShape')]
>>> sel = s.asSelectionSet() # or as a SelectionSet
>>> sel # doctest: +SKIP
nt.SelectionSet([u'sideShape', u'frontShape', u'topShape', u'perspShape'])
>>> sorted(sel) # as a sorted list
[nt.Camera(u'frontShape'), nt.Camera(u'perspShape'), nt.Camera(u'sideShape'), nt.Camera(u'topShape')]
Operations between sets result in `SelectionSet` objects:
>>> t = sets() # create another set
>>> t.add( 'perspShape' ) # add the persp camera shape to it
>>> s.getIntersection(t)
nt.SelectionSet([u'perspShape'])
>>> diff = s.getDifference(t)
>>> diff #doctest: +SKIP
nt.SelectionSet([u'sideShape', u'frontShape', u'topShape'])
>>> sorted(diff)
[nt.Camera(u'frontShape'), nt.Camera(u'sideShape'), nt.Camera(u'topShape')]
>>> s.isSuperSet(t)
True
"""
# >>> u = sets( s&t ) # intersection
# >>> print u.elements(), s.elements()
# >>> if u < s: print "%s is a sub-set of %s" % (u, s)
#
# place a set inside another, take1
#
# >>> # like python's built-in set, the add command expects a single element
# >>> s.add( t )
#
# place a set inside another, take2
#
# >>> # like python's built-in set, the update command expects a set or a list
# >>> t.update([u])
#
# >>> # put the sets back where they were
# >>> s.remove(t)
# >>> t.remove(u)
#
# now put the **contents** of a set into another set
#
# >>> t.update(u)
#
# mixed operation between pymel.core.ObjectSet and built-in set
#
# >>> v = set(['polyCube3', 'pSphere3'])
# >>> print s.intersection(v)
# >>> print v.intersection(s) # not supported yet
# >>> u.clear()
#
# >>> delete( s )
# >>> delete( t )
# >>> delete( u )
#
#
# these will return the results of the operation as python sets containing lists of pymel node classes::
#
# s&t # s.intersection(t)
# s|t # s.union(t)
# s^t # s.symmetric_difference(t)
# s-t # s.difference(t)
#
# the following will alter the contents of the maya set::
#
# s&=t # s.intersection_update(t)
# s|=t # s.update(t)
# s^=t # s.symmetric_difference_update(t)
# s-=t # s.difference_update(t)
#
# def _elements(self):
# """ used internally to get a list of elements without casting to node classes"""
# return sets( self, q=True)
# #-----------------------
# # Maya Methods
# #-----------------------
__metaclass__ = _factories.MetaMayaNodeWrapper
#-----------------------
# Python ObjectSet Methods
#-----------------------
@classmethod
def _getApiObjs(cls, item, tryCast=True):
"""
Returns a tuple of api objects suitable (after unpacking) for
feeding to most of the MFnSet methods (ie, remove, isMember, etc)
"""
if isinstance(item, DagNode):
return ( item.__apimdagpath__(), _api.MObject() )
elif isinstance(item, (DependNode, general.Attribute) ):
return ( item.__apiobject__(), )
elif isinstance(item, general.Component):
return ( item.__apimdagpath__(), item.__apimobject__() )
elif tryCast:
return cls._getApiObjs(general.PyNode(item), tryCast=False)
else:
raise TypeError(item)
def __contains__(self, item):
""":rtype: `bool` """
return self.__apimfn__().isMember(*self._getApiObjs(item))
def __getitem__(self, index):
return self.asSelectionSet()[index]
def __len__(self):
""":rtype: `int`"""
return cmds.sets(self, q=1, size=1)
#def __eq__(self, s):
# return s == self._elements()
#def __ne__(self, s):
# return s != self._elements()
def __and__(self, s):
"operator for `ObjectSet.getIntersection`"
return self.getIntersection(s)
def __iand__(self, s):
"operator for `ObjectSet.intersection`"
return self.intersection(s)
def __or__(self, s):
"operator for `ObjectSet.getUnion`"
return self.getUnion(s)
def __ior__(self, s):
"operator for `ObjectSet.union`"
return self.union(s)
# def __lt__(self, s):
# "operator for `ObjectSet.isSubSet`"
# return self.isSubSet(s)
#
# def __gt__(self, s):
# "operator for `ObjectSet.isSuperSet`"
# return self.isSuperSet(s)
def __sub__(self, s):
"operator for `ObjectSet.getDifference`"
return self.getDifference(s)
def __isub__(self, s):
"operator for `ObjectSet.difference`"
return self.difference(s)
def __xor__(self, s):
"operator for `ObjectSet.symmetricDifference`"
return self.getSymmetricDifference(s)
def __ixor__(self, s):
"operator for `ObjectSet.symmetricDifference`"
return self.symmetricDifference(s)
#
# def subtract(self, set2):
# return sets( self, subtract=set2 )
#
# def add(self, element):
# return sets( self, add=[element] )
#
# def clear(self):
# return sets( self, clear=True )
#
# def copy(self ):
# return sets( self, copy=True )
#
# def difference(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return list(set(self.elements()).difference(elements))
#
# '''
# if isinstance(s, ObjectSet) or isinstance(s, str):
# return sets( s, subtract=self )
#
# s = sets( s )
# res = sets( s, subtract=self )
# cmds.delete(s)
# return res'''
#
# def difference_update(self, elements ):
# return sets( self, remove=elements)
#
# def discard( self, element ):
# try:
# return self.remove(element)
# except TypeError:
# pass
#
# def intersection(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).intersection(elements)
#
# def intersection_update(self, elements):
# self.clear()
# sets( self, add=self.intersections(elements) )
#
#
# def remove( self, element ):
# return sets( self, remove=[element])
#
# def symmetric_difference(self, elements):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).symmetric_difference(elements)
#
# def union( self, elements ):
# if isinstance(elements,basestring):
# elements = cmds.sets( elements, q=True)
# return set(self.elements()).union(elements)
#
# def update( self, set2 ):
# sets( self, forceElement=set2 )
def members(self, flatten=False):
"""return members as a list
:rtype: `list`
"""
return list( self.asSelectionSet(flatten) )
@_warnings.deprecated( 'Use ObjectSet.members instead', 'ObjectSet' )
def elements(self, flatten=False):
"""return members as a list
:rtype: `list`
"""
return list( self.asSelectionSet(flatten) )
def flattened(self):
"""return a flattened list of members. equivalent to `ObjectSet.members(flatten=True)`
:rtype: `list`
"""
return self.members(flatten=True)
def resetTo(self, newContents ):
"""clear and set the members to the passed list/set"""
self.clear()
self.addMembers( newContents )
def add(self, item):
return self.__apimfn__().addMember(*self._getApiObjs(item))
def remove(self, item):
try:
return self.__apimfn__().removeMember(*self._getApiObjs(item))
except RuntimeError:
# Provide a more informative error if object is not in set
if item not in self:
try:
itemStr = repr(item)
except Exception:
itemStr = 'item'
raise ValueError("%s not in set %r" % (itemStr, self))
else:
raise
def isSubSet(self, other):
""":rtype: `bool`"""
return self.asSelectionSet().isSubSet(other)
def isSuperSet(self, other ):
""":rtype: `bool`"""
return self.asSelectionSet().isSuperSet(other)
def isEqual(self, other ):
"""
do not use __eq__ to test equality of set contents. __eq__ will only tell you if
the passed object is the same node, not if this set and the passed set
have the same contents.
:rtype: `bool`
"""
return self.asSelectionSet() == SelectionSet(other)
def getDifference(self, other):
""":rtype: `SelectionSet`"""
sel = self.asSelectionSet()
sel.difference(other)
return sel
def difference(self, other):
sel = self.getDifference(other)
self.resetTo(sel)
def getSymmetricDifference(self, other):
"""also known as XOR
:rtype: `SelectionSet`
"""
sel = self.getSymmetricDifference()
sel.difference(other)
return sel
def symmetricDifference(self, other):
sel = self.symmetricDifference(other)
self.resetTo(sel)
def getIntersection(self, other):
""":rtype: `SelectionSet`"""
if isinstance(other, ObjectSet):
return self._getIntersection(other)
#elif isinstance(other, SelectionSet) or hasattr(other, '__iter__'):
selSet = self.asSelectionSet()
selSet.intersection(other)
return selSet
#raise TypeError, 'Cannot perform intersection with non-iterable type %s' % type(other)
def intersection(self, other):
sel = self.getIntersection(other)
self.resetTo(sel)
def getUnion(self, other):
""":rtype: `SelectionSet`"""
if isinstance(other, ObjectSet):
return self._getUnion(other)
selSet = self.asSelectionSet()
selSet.union(other)
return selSet
def union(self, other):
self.addMembers(other)
def isRenderable(self):
'''Mimics cmds.sets(self, q=True, renderable=True).
Alternatively you can use isinstance(someset, pm.nt.ShadingEngine)
since shadingEngine is the only renderable set in maya now
'''
return bool(cmds.sets(self, q=True, r=True))
class ShadingEngine(ObjectSet):
@classmethod
def _getApiObjs(cls, item, tryCast=True):
# Since shading groups can't contain transforms, as a convenience,
# use getShape on any transforms
if isinstance(item, Transform):
shape = item.getShape()
if shape:
return cls._getApiObjs(shape)
else:
try:
itemStr = repr(item)
except Exception:
itemStr = 'item'
raise TypeError("%s has no shape, and %s objects cannot contain Transforms" % (itemStr, cls.__name__))
else:
return super(ShadingEngine, cls)._getApiObjs(item, tryCast=tryCast)
class AnimLayer(ObjectSet):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getAttribute(self):
'''Retrieve the attributes animated on this AnimLayer
'''
# Unfortunately, cmds.animLayer('MyAnimLayer', q=1, attribute=1)
# returns none unique attribute names, ie,
# MyNode.myAttr
# even if there are foo|MyNode and bar|MyNode in the scene, and there
# doesn't seem to be a flag to tell it to give unique / full paths.
# Therefore, query it ourselves, by gettin inputs to dagSetMembers.
# Testing has shown that animLayers only use dagSetMembers, and never
# dnSetMembers - if you add a non-dag node to an animLayer, it makes
# a connection to dagSetMembers; and even if you manually make a connection
# to dnSetMembers, those connections don't seem to show up in
# animLayer(q=1, attribute=1)
return self.attr('dagSetMembers').inputs(plugs=1)
getAttributes = getAttribute
class AnimCurve(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
def addKeys(self,time,values,tangentInType='linear',tangentOutType='linear',unit=None):
if not unit:
unit = _api.MTime.uiUnit()
times = _api.MTimeArray()
for frame in time: times.append(_api.MTime(frame,unit))
keys = _api.MDoubleArray()
for value in values: keys.append(value)
return self.__apimfn__().addKeys( times, keys,
_factories.apiClassInfo['MFnAnimCurve']['enums']['TangentType']['values'].getIndex('kTangent'+tangentInType.capitalize()),
_factories.apiClassInfo['MFnAnimCurve']['enums']['TangentType']['values'].getIndex('kTangent'+tangentOutType.capitalize()))
class GeometryFilter(DependNode): pass
class SkinCluster(GeometryFilter):
__metaclass__ = _factories.MetaMayaNodeWrapper
def getWeights(self, geometry, influenceIndex=None):
if not isinstance(geometry, general.PyNode):
geometry = general.PyNode(geometry)
if isinstance( geometry, Transform ):
try:
geometry = geometry.getShape()
except:
raise TypeError, "%s is a transform with no shape" % geometry
if isinstance(geometry, GeometryShape):
components = _api.toComponentMObject( geometry.__apimdagpath__() )
elif isinstance(geometry, general.Component):
components = geometry.__apiobject__()
else:
raise TypeError
if influenceIndex is not None:
weights = _api.MDoubleArray()
self.__apimfn__().getWeights( geometry.__apimdagpath__(), components, influenceIndex, weights )
return iter(weights)
else:
weights = _api.MDoubleArray()
index = _api.SafeApiPtr('uint')
self.__apimfn__().getWeights( geometry.__apimdagpath__(), components, weights, index() )
index = index.get()
args = [iter(weights)] * index
return itertools.izip(*args)
def setWeights(self, geometry, influnces, weights, normalize=True):
if not isinstance(geometry, general.PyNode):
geometry = general.PyNode(geometry)
if isinstance( geometry, Transform ):
try:
geometry = geometry.getShape()
except:
raise TypeError, "%s is a transform with no shape" % geometry
if isinstance(geometry, GeometryShape):
components = _api.toComponentMObject( geometry.__apimdagpath__() )
elif isinstance(geometry, general.Component):
components = geometry.__apiobject__()
else:
raise TypeError
if not isinstance(influnces,_api.MIntArray):
api_influnces = _api.MIntArray()
for influnce in influnces:
api_influnces.append(influnce)
influnces = api_influnces
if not isinstance(weights,_api.MDoubleArray):
api_weights = _api.MDoubleArray()
for weight in weights:
api_weights.append(weight)
weights = api_weights
old_weights = _api.MDoubleArray()
su = _api.MScriptUtil()
su.createFromInt(0)
index = su.asUintPtr()
self.__apimfn__().getWeights( geometry.__apimdagpath__(), components, old_weights, index )
return self.__apimfn__().setWeights( geometry.__apimdagpath__(), components, influnces, weights, normalize, old_weights )
@_factories.addApiDocs( _api.MFnSkinCluster, 'influenceObjects' )
def influenceObjects(self):
return self._influenceObjects()[1]
def numInfluenceObjects(self):
return self._influenceObjects()[0]
# TODO: if nucleus/symmetryConstraint bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of Nucleus's parent to DependNode
# - remove 2 checks in allapi.toApiObject for objects which can have an MDagPath
# but can't use MFnDagNode
if _apicache.NUCLEUS_MFNDAG_BUG:
# nucleus has a weird bug where, even though it inherits from transform, and
# can be parented in the dag, etc, you can't create an MFnTransform or
# MFnDagNode for it... therefore, hardcode it's PyNode to inherit from
# DependNode
class Nucleus(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
if _apicache.SYMMETRY_CONSTRAINT_MFNDAG_BUG:
class SymmetryConstraint(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
# TODO: if hikHandle bug ever fixed:
# - remove entry in apiCache.ApiCache.API_TO_MFN_OVERRIDES
# - remove hard-code setting of HikHandle's parent to Transform
class HikHandle(Transform):
__metaclass__ = _factories.MetaMayaNodeWrapper
class JointFfd(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
class TransferAttributes(DependNode):
__metaclass__ = _factories.MetaMayaNodeWrapper
_factories.ApiTypeRegister.register( 'MSelectionList', SelectionSet )
def _createPyNodes():
dynModule = _util.LazyLoadModule(__name__, globals())
for mayaType, parents, children in _factories.nodeHierarchy:
if mayaType == 'dependNode':
# This seems like the more 'correct' way of doing it - only node types
# that are currently available have PyNodes created for them - but
# changing it so some PyNodes are no longer available until their
# plugin is loaded may create backwards incompatibility issues...
# if (mayaType == 'dependNode'
# or mayaType not in _factories.mayaTypesToApiTypes):
continue
parentMayaType = parents[0]
#print "superNodeType: ", superNodeType, type(superNodeType)
if parentMayaType is None:
_logger.warning("could not find parent node: %s", mayaType)
continue
#className = _util.capitalize(mayaType)
#if className not in __all__: __all__.append( className )
if _factories.isMayaType(mayaType):
_factories.addPyNode( dynModule, mayaType, parentMayaType )
sys.modules[__name__] = dynModule
# Initialize Pymel classes to API types lookup
#_startTime = time.time()
_createPyNodes()
#_logger.debug( "Initialized Pymel PyNodes types list in %.2f sec" % time.time() - _startTime )
dynModule = sys.modules[__name__]
#def listToMSelection( objs ):
# sel = _api.MSelectionList()
# for obj in objs:
# if isinstance(obj, DependNode):
# sel.add( obj.__apiobject__() )
# elif isinstance(obj, Attribute):
# sel.add( obj.__apiobject__(), True )
# elif isinstance(obj, Component):
# pass
# #sel.add( obj.__apiobject__(), True )
# else:
# raise TypeError
| bsd-3-clause | 638,128,032,992,870,800 | 36.748704 | 531 | 0.560318 | false | 3.778097 | false | false | false |
google-research/google-research | optimizing_interpretability/imagenet/utils.py | 1 | 6411 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utils for training."""
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
def add_noise(input_image, noise, multiple_image_std, size=224):
"""Transformation of a single image by adding noise.
If a random gaussian distribution of noisy is specified (noise='r_normal'),
the standard deviation of the noise added is based upon the dynamic range of
the image weighed by multiple_image_std argument. This appears to work
well empirically, and is the subject of additional research.
Args:
input_image: A single input image, float32 tensor
noise: String that specifies the distribution of noise to add as either a
gaussian distribution (r_normal) or a uniform distribution (r_uniform).
multiple_image_std: Weight to place on the range of input values.
size: size of noise matrix (should match image size)
Returns:
noisy_image: The input with the addition of a noise distribution.
Raises:
ValueError: Raised if the string specifying the noise distribution does
not correspond to the noise implementations.
"""
if noise == 'r_normal':
image_min = tf.reduce_min(input_image)
image_max = tf.reduce_max(input_image)
diff = tf.reduce_mean(tf.subtract(image_max, image_min))
range_ = tf.to_float(tf.multiply(tf.constant([multiple_image_std]), diff))
noise = tf.random_normal(
shape=[size, size, 3], stddev=range_, dtype=tf.float32)
elif noise == 'r_uniform':
percentile_ = tfp.stats.percentile(input_image, q=10.)
noise = tf.random.uniform(
minval=-percentile_,
maxval=percentile_,
shape=[size, size, 3],
dtype=tf.float32)
else:
raise ValueError('Noise type not found:', noise)
noisy_image = tf.add(input_image, noise)
return noisy_image
def noise_layer(images,
labels,
multiple_image_std=0.15,
size=224,
jitter_multiplier=1,
noise='r_normal'):
"""Add noise to a subset of images in a batch.
Args:
images: The batch of images.
labels: Labels associated with images.
multiple_image_std: Weight to place on the range of input values.
size: The size of the image.
jitter_multiplier: number of images to add noise to.
noise: String that specifies the distribution of noise to add.
Returns:
noisy_images: A set of images (num_images*jitter_multiplier) with injected
noise.
tiled_labels: Associated labels for the noisy images.
"""
images_noise = tf.tile(
images, multiples=tf.constant([jitter_multiplier, 1, 1, 1], shape=[
4,
]))
noisy_images = tf.map_fn(
lambda x: add_noise(x, noise, multiple_image_std, size), images_noise)
noisy_images = tf.concat([images, noisy_images], axis=0)
tiled_labels = tf.tile(labels, tf.constant([jitter_multiplier], shape=[1]))
tiled_labels = tf.concat([labels, tiled_labels], axis=0)
return noisy_images, tiled_labels
def format_tensors(*dicts):
"""Formats metrics to be callable as tf.summary scalars on tpu's.
Args:
*dicts: A set of metric dictionaries, containing metric name + value tensor.
Returns:
A single formatted dictionary that holds all tensors.
Raises:
ValueError: if any tensor is not a scalar.
"""
merged_summaries = {}
for d in dicts:
for metric_name, value in d.items():
shape = value.shape.as_list()
if not shape:
merged_summaries[metric_name] = tf.expand_dims(value, axis=0)
elif shape == [1]:
merged_summaries[metric_name] = value
else:
raise ValueError(
'Metric {} has value {} that is not reconciliable'.format(
metric_name, value))
return merged_summaries
def host_call_fn(model_dir, **kwargs):
"""creates training summaries when using TPU.
Args:
model_dir: String indicating the output_dir to save summaries in.
**kwargs: Set of metric names and tensor values for all desired summaries.
Returns:
Summary op to be passed to the host_call arg of the estimator function.
"""
gs = kwargs.pop('global_step')[0]
with tf.contrib.create_file_writer(model_dir).as_default():
with tf.contrib.always_record_summaries():
for name, tensor in kwargs.items():
tf.summary.scalar(name, tensor[0], step=gs)
return tf.contrib.summary.all_summary_ops()
def get_lr_schedule(train_steps, num_train_images, train_batch_size):
"""learning rate schedule."""
steps_per_epoch = np.floor(num_train_images / train_batch_size)
train_epochs = train_steps / steps_per_epoch
return [ # (multiplier, epoch to start) tuples
(1.0, np.floor(5 / 90 * train_epochs)),
(0.1, np.floor(30 / 90 * train_epochs)),
(0.01, np.floor(60 / 90 * train_epochs)),
(0.001, np.floor(80 / 90 * train_epochs))
]
def learning_rate_schedule(params, current_epoch, train_batch_size,
num_train_images):
"""Handles linear scaling rule, gradual warmup, and LR decay.
Args:
params: Python dict containing parameters for this run.
current_epoch: `Tensor` for current epoch.
train_batch_size: batch size adjusted for PIE
num_train_images: total number of train images
Returns:
A scaled `Tensor` for current learning rate.
"""
scaled_lr = params['base_learning_rate'] * (train_batch_size / 256.0)
lr_schedule = get_lr_schedule(
train_steps=params['train_steps'],
num_train_images=num_train_images,
train_batch_size=train_batch_size)
decay_rate = (
scaled_lr * lr_schedule[0][0] * current_epoch / lr_schedule[0][1])
for mult, start_epoch in lr_schedule:
decay_rate = tf.where(current_epoch < start_epoch, decay_rate,
scaled_lr * mult)
return decay_rate
| apache-2.0 | 4,932,138,354,840,355,000 | 33.842391 | 80 | 0.676182 | false | 3.740373 | false | false | false |
lorddex/linux_tools | virtual_testbeds/add_hosts.py | 1 | 2360 | #!/usr/bin/python
# script that adds a VM who requests an IP address using the dhcpd to local hosts file
import sys
import subprocess
import string
import time
debug_file="/var/log/add_hosts.log"
def debug(message):
message = time.strftime("%d %b %Y %H:%M:%S") + " " + message
print message
fd = open(debug_file, "a")
fd.write(message + "\n")
fd.close()
text=""
for arg in sys.argv:
text = text +" "+arg
debug(text)
action=sys.argv[1]
ip=sys.argv[3]
mac=sys.argv[2]
hosts="/etc/hosts"
# if del action is called exit from this script
if action == "del":
# fd=open(hosts, "r")
# hosts_lines=fd.readlines()
# fd.close()
# fd=open(hosts, "w")
# for line in hosts_lines:
# if ip not in line:
# fd.write(line)
# debug( "Ok, %s deleted from %s file" % (name, hosts))
sys.exit(0)
# add address to local hosts file
#command = ["/bin/ps", "-eo", "command"]
#process = subprocess.Popen(command, stdout=subprocess.PIPE, bufsize=9192)
if len(sys.argv) == 5:
name = sys.argv[4]
debug("host name from parameters: "+name)
else:
command = "ps axo pid,command | grep /usr/bin/kvm"
process = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
found = None
for line in process.stdout.readlines():
pid=line.split(" ")
pid = pid[0]
fd_c = open("/proc/"+pid+"/cmdline", "r")
lines=fd_c.readlines()
fd_c.close()
if len(lines)>0:
line=lines[0]
line=string.replace(line, "-", " -")
line=string.replace(line, "\x00", " ")
else:
continue
if mac in line and "add_host" not in line:
found = line
break
if found is None:
debug("Ops, no VM with %s found" % mac)
sys.exit(1)
parms = found.split(" -")[1:]
name=False
for par in parms:
if par.strip().startswith("name"):
name = par.strip().split(" ")[1]
if name is False:
debug("Ops, VM name not found")
sys.exit(2)
fd=open(hosts, "r")
hosts_lines=fd.readlines()
fd.close()
already=False
for line in hosts_lines:
if name in line:
already=line
break
change=False
if already is not False:
if ip in line:
debug("Ok, VM already in hosts file")
sys.exit(0)
else:
change=True
if change is False:
fd=open(hosts, "a")
fd.write(ip + "\t\t" + name +"\n")
else:
fd=open(hosts, "w")
for line in hosts_lines:
if name in line:
line = ip + "\t\t" + name + "\n"
fd.write(line)
fd.close()
debug( "Ok, %s added to %s file" % (name, hosts))
| mit | 8,233,084,119,662,574,000 | 20.454545 | 86 | 0.642373 | false | 2.610619 | false | false | false |
jromang/retina | gui/workspace.py | 1 | 1423 | # Copyright (C) 2013-2016 Jean-Francois Romang ([email protected])
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#TODO : inherit from 'workspaceobject' - abstract hide/show method
# ou "add"/"move" pethod avec des qwidgets
max_workspaces=4
workspace_objects= set()
current = 0
buttons = []
def add(qwidget, workspace_id=None):
workspace_objects.add(qwidget)
qwidget.workspace=current if workspace_id is None else workspace_id
#print("qwidget workspace:"+str(qwidget.workspace))
def switch(workspace_id):
#print("switch to:"+str(workspace_id))
global current
current=workspace_id
for widget in workspace_objects:
if widget.workspace==workspace_id: widget.show()
else: widget.hide()
for button in buttons:
button.setChecked(False)
buttons[workspace_id].setChecked(True)
| gpl-3.0 | -2,353,139,725,358,365,700 | 33.707317 | 71 | 0.735067 | false | 3.774536 | false | false | false |
Hao-Liu/avocado | selftests/unit/test_xunit.py | 1 | 1945 | import argparse
import unittest
import os
import sys
from xml.dom import minidom
import tempfile
import shutil
from avocado import Test
from avocado.core.plugins import xunit
from avocado.core import job
class ParseXMLError(Exception):
pass
class _Stream(object):
def start_file_logging(self, param1, param2):
pass
def stop_file_logging(self):
pass
def set_tests_info(self, info):
pass
def notify(self, event, msg):
pass
def add_test(self, state):
pass
def set_test_status(self, status, state):
pass
class xUnitSucceedTest(unittest.TestCase):
def setUp(self):
class SimpleTest(Test):
def test(self):
pass
self.tmpfile = tempfile.mkstemp()
self.tmpdir = tempfile.mkdtemp()
args = argparse.Namespace()
args.xunit_output = self.tmpfile[1]
self.test_result = xunit.xUnitTestResult(stream=_Stream(), args=args)
self.test_result.start_tests()
self.test1 = SimpleTest(job=job.Job(), base_logdir=self.tmpdir)
self.test1.status = 'PASS'
self.test1.time_elapsed = 1.23
def tearDown(self):
os.close(self.tmpfile[0])
os.remove(self.tmpfile[1])
shutil.rmtree(self.tmpdir)
def testAddSuccess(self):
self.test_result.start_test(self.test1)
self.test_result.end_test(self.test1.get_state())
self.test_result.end_tests()
self.assertTrue(self.test_result.xml)
with open(self.test_result.output) as fp:
xml = fp.read()
try:
dom = minidom.parseString(xml)
except Exception, details:
raise ParseXMLError("Error parsing XML: '%s'.\nXML Contents:\n%s" % (details, xml))
self.assertTrue(dom)
els = dom.getElementsByTagName('testcase')
self.assertEqual(len(els), 1)
if __name__ == '__main__':
unittest.main()
| gpl-2.0 | -2,193,562,886,374,426,000 | 23.3125 | 95 | 0.62108 | false | 3.690702 | true | false | false |
frac/celery | celery/signals.py | 1 | 6128 | """
==============
celery.signals
==============
Signals allows decoupled applications to receive notifications when
certain actions occur elsewhere in the application.
:copyright: (c) 2009 - 2011 by Ask Solem.
:license: BSD, see LICENSE for more details.
.. contents::
:local:
.. _signal-basics:
Basics
======
Several kinds of events trigger signals, you can connect to these signals
to perform actions as they trigger.
Example connecting to the :signal:`task_sent` signal:
.. code-block:: python
from celery.signals import task_sent
def task_sent_handler(sender=None, task_id=None, task=None, args=None,
kwargs=None, **kwds):
print("Got signal task_sent for task id %s" % (task_id, ))
task_sent.connect(task_sent_handler)
Some signals also have a sender which you can filter by. For example the
:signal:`task_sent` signal uses the task name as a sender, so you can
connect your handler to be called only when tasks with name `"tasks.add"`
has been sent by providing the `sender` argument to
:class:`~celery.utils.dispatch.signal.Signal.connect`:
.. code-block:: python
task_sent.connect(task_sent_handler, sender="tasks.add")
.. _signal-ref:
Signals
=======
Task Signals
------------
.. signal:: task_sent
task_sent
~~~~~~~~~
Dispatched when a task has been sent to the broker.
Note that this is executed in the client process, the one sending
the task, not in the worker.
Sender is the name of the task being sent.
Provides arguments:
* task_id
Id of the task to be executed.
* task
The task being executed.
* args
the tasks positional arguments.
* kwargs
The tasks keyword arguments.
* eta
The time to execute the task.
* taskset
Id of the taskset this task is part of (if any).
.. signal:: task_prerun
task_prerun
~~~~~~~~~~~
Dispatched before a task is executed.
Sender is the task class being executed.
Provides arguments:
* task_id
Id of the task to be executed.
* task
The task being executed.
* args
the tasks positional arguments.
* kwargs
The tasks keyword arguments.
.. signal:: task_postrun
task_postrun
~~~~~~~~~~~~
Dispatched after a task has been executed.
Sender is the task class executed.
Provides arguments:
* task_id
Id of the task to be executed.
* task
The task being executed.
* args
The tasks positional arguments.
* kwargs
The tasks keyword arguments.
* retval
The return value of the task.
.. signal:: task_failure
task_failure
~~~~~~~~~~~~
Dispatched when a task fails.
Sender is the task class executed.
Provides arguments:
* task_id
Id of the task.
* exception
Exception instance raised.
* args
Positional arguments the task was called with.
* kwargs
Keyword arguments the task was called with.
* traceback
Stack trace object.
* einfo
The :class:`celery.datastructures.ExceptionInfo` instance.
Worker Signals
--------------
.. signal:: worker_init
worker_init
~~~~~~~~~~~
Dispatched before the worker is started.
.. signal:: worker_ready
worker_ready
~~~~~~~~~~~~
Dispatched when the worker is ready to accept work.
.. signal:: worker_process_init
worker_process_init
~~~~~~~~~~~~~~~~~~~
Dispatched by each new pool worker process when it starts.
.. signal:: worker_shutdown
worker_shutdown
~~~~~~~~~~~~~~~
Dispatched when the worker is about to shut down.
Celerybeat Signals
------------------
.. signal:: beat_init
beat_init
~~~~~~~~~
Dispatched when celerybeat starts (either standalone or embedded).
Sender is the :class:`celery.beat.Service` instance.
.. signal:: beat_embedded_init
beat_embedded_init
~~~~~~~~~~~~~~~~~~
Dispatched in addition to the :signal:`beat_init` signal when celerybeat is
started as an embedded process. Sender is the
:class:`celery.beat.Service` instance.
Eventlet Signals
----------------
.. signal:: eventlet_pool_started
eventlet_pool_started
~~~~~~~~~~~~~~~~~~~~~
Sent when the eventlet pool has been started.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
.. signal:: eventlet_pool_preshutdown
eventlet_pool_preshutdown
~~~~~~~~~~~~~~~~~~~~~~~~~
Sent when the worker shutdown, just before the eventlet pool
is requested to wait for remaining workers.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
.. signal:: eventlet_pool_postshutdown
eventlet_pool_postshutdown
~~~~~~~~~~~~~~~~~~~~~~~~~~
Sent when the pool has been joined and the worker is ready to shutdown.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
.. signal:: eventlet_pool_apply
eventlet_pool_apply
~~~~~~~~~~~~~~~~~~~
Sent whenever a task is applied to the pool.
Sender is the :class:`celery.concurrency.evlet.TaskPool` instance.
Provides arguments:
* target
The target function.
* args
Positional arguments.
* kwargs
Keyword arguments.
"""
from celery.utils.dispatch import Signal
task_sent = Signal(providing_args=["task_id", "task",
"args", "kwargs",
"eta", "taskset"])
task_prerun = Signal(providing_args=["task_id", "task",
"args", "kwargs"])
task_postrun = Signal(providing_args=["task_id", "task",
"args", "kwargs", "retval"])
task_failure = Signal(providing_args=["task_id", "exception",
"args", "kwargs", "traceback",
"einfo"])
worker_init = Signal(providing_args=[])
worker_process_init = Signal(providing_args=[])
worker_ready = Signal(providing_args=[])
worker_shutdown = Signal(providing_args=[])
setup_logging = Signal(providing_args=["loglevel", "logfile",
"format", "colorize"])
beat_init = Signal(providing_args=[])
beat_embedded_init = Signal(providing_args=[])
eventlet_pool_started = Signal(providing_args=[])
eventlet_pool_preshutdown = Signal(providing_args=[])
eventlet_pool_postshutdown = Signal(providing_args=[])
eventlet_pool_apply = Signal(providing_args=["target", "args", "kwargs"])
| bsd-3-clause | -1,422,268,699,349,315,800 | 19.426667 | 75 | 0.6578 | false | 3.771077 | false | false | false |
fullmetalfelix/ML-CSC-tutorial | data/descriptor_codes/charge.mbtr.py | 1 | 2490 | from __future__ import print_function
from describe.descriptors import LMBTR
from describe.core import System
from describe.data.element_data import numbers_to_symbols
import numpy as np
from scipy.sparse import lil_matrix, save_npz
from read_binary import *
data = read_b('../binary/database-mulliken-ccsd-spd.bin')
decay_factor = 0.5
mbtr = LMBTR(
atom_index = 1,
atomic_numbers=[1, 6, 7, 8, 9],
k=[1, 2, 3],
periodic=False,
grid={
"k1": {
"min": 0,
"max": 10,
"sigma": 0.1,
"n": 11,
},
"k2": {
"min": 1/7,
"max": 1.5,
"sigma": 0.01,
"n": 50,
},
"k3": {
"min": -1.0,
"max": 1.0,
"sigma": 0.05,
"n": 50,
}
},
weighting={
"k2": {
"function": lambda x: np.exp(-decay_factor*x),
"threshold": 1e-3
},
"k3": {
"function": lambda x: np.exp(-decay_factor*x),
"threshold": 1e-3
},
},
flatten=True)
mbtr_nfeat = mbtr.get_number_of_features()
elements_list = [1, 6, 7, 8, 9]
chg = np.empty(len(elements_list), dtype='object')
max_chg_count = [10000]*4+[3314]
for i, j in enumerate(max_chg_count):
chg[i] = lil_matrix((j, mbtr_nfeat))
chg_count = np.zeros(len(elements_list), dtype='int')
for atom_ind, atoms in enumerate(data):
atoms_sys = System(positions=atoms.coords, numbers=atoms.Zs)
elements_req = np.array(elements_list)[chg_count != max_chg_count].tolist()
print('\r {}'.format(chg_count), end = '')
for element in elements_req:
element_ind = elements_list.index(element)
if chg_count[element_ind] != max_chg_count[element_ind] and element in atoms.Zs:
element_indx_atoms = np.where(atoms.Zs == element)[0]
len_added = min(element_indx_atoms.shape[0], max_chg_count[element_ind]-chg_count[element_ind])
for i in range(chg_count[element_ind], chg_count[element_ind]+len_added):
mbtr.atom_index = element_indx_atoms[i - chg_count[element_ind]]
chg[element_ind][i] = mbtr.create(atoms_sys)
chg_count[element_ind] += len_added
if np.sum(chg_count) == sum(max_chg_count):
break
for i, j in enumerate(elements_list):
save_npz('../charge.{}.input.mbtr'.format(j), chg[i].tocsr())
| gpl-3.0 | 3,026,851,773,269,828,600 | 29.740741 | 107 | 0.538554 | false | 3.180077 | false | false | false |
arnau-prat/My-Raspersonal-assistant | assets/brain.py | 1 | 1572 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import cv2
import threading
import time
import subprocess
import os
import signal
from datetime import datetime
from modules import tracker, calendar, alarm, wolfram, music
class Brain:
def __init__(self):
self.tracker = tracker.Tracker()
self.alarm = alarm.Alarm()
self.calendar = calendar.Calendar()
self.wolfram = wolfram.Wolfram()
self.music = music.Music()
def think(self, text):
if ("timer" in text) | ("alarm" in text):
response = self.alarm.think(text)
elif ("time" in text):
response = datetime.now().strftime("It's %I:%M%p")
elif ("day" in text) | ("date" in text):
response = datetime.now().strftime("%A %d of %B")
elif ("music" in text) | ("play" in text):
response = self.music.play()
elif ("take" in text) | ("photo" in text):
response = "taking picture"
image = cv2.imread("/home/pi/Desktop/im.jpg")
image = cv2.resize(image,(800,600))
cv2.imwrite("/hoe/pi/Desktop/def.jpg",image)
time.sleep(1)
os.system ('mpg321 assets/camera_shutter.mp3')
elif ("wake" in text) | ("up" in text):
self.tracker.start()
response = "I'm waking up sir"
elif ("down" in text) | ("sleep" in text):
self.tracker.stop()
response = "I'm going to sleep now"
elif "calendar" in text:
response = self.calendar.think(text)
else:
response = self.wolfram.think(text)
return response
| mit | -7,316,954,540,866,034,000 | 25.2 | 62 | 0.583333 | false | 3.454945 | false | false | false |
tomadasocial/tomada-social | tomadasocial/settings.py | 1 | 3965 | """
Django settings for tomadasocial project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '!qy$6$mh%b2mp$)km*!^uaf-v%givqnzzndo0b)y)qo93p973_'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, '..', 'media')
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = '/media/'
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(BASE_DIR, '..', 'static')
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework_mongoengine',
'account',
'evento',
'conta',
'event',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'tomadasocial.urls'
WSGI_APPLICATION = 'tomadasocial.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy'
}
}
AUTHENTICATION_BACKENDS = (
'mongoengine.django.auth.MongoEngineBackend',
)
SESSION_ENGINE = 'mongoengine.django.sessions'
SESSION_SERIALIZER = 'mongoengine.django.sessions.BSONSerializer'
# from mongoengine import connect
# connect('records', username='recordsUserAdmin', password='password')
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
from mongoengine import connect
# connect('admin', username='admin', password='senha')
#connect("cc_bQWsNhAJvOH", host='mongodb://bQWsNhAJvOHi:[email protected]:31904/cc_bQWsNhAJvOHi')
connect("heroku_app33947277", host='mongodb://tomadasocial:[email protected]:61681/heroku_app33947277')
#connect("admin", host='mongodb://admin:[email protected]:27017/admin') | gpl-2.0 | -515,293,318,295,498,900 | 28.819549 | 123 | 0.735183 | false | 3.25 | false | false | false |
tmaiwald/OSIM | OSIM/Optimizations/OptimizationComponents/Optimizable.py | 1 | 1217 |
class Optimizable(object):
def __init__(self,comp_names_list,paramname,valfrom,valto,**kwargs):
self.names = comp_names_list
self.paramname = paramname
self.minStep = 2 #default
self.vFrom = valfrom
self.vTo = valto
self.val = 0
for name, value in kwargs.items():
if name == 'minSteps':
self.minStep = value
def setValue(self, v):
self.val = v
def getRangeBegin(self):
return self.vFrom
def getRangeEnd(self):
return self.vTo
def getValue(self):
return self.val
def getOptimizableComponentNames(self):
return self.names
def toString(self):
stri = ""
for n in self.names:
stri = stri+" "+n
return (stri+" at %s"%(str(self.val)))
def getParamName(self):
return self.paramname
@staticmethod
def getSetableList(olist):
setableList = list()
for o in olist:
for n in o.getOptimizableComponentNames():
"""compname, paramname, paramval"""
n = [n, o.getParamName(), o.getValue()]
setableList.append(n)
return setableList
| bsd-2-clause | 6,869,095,868,445,226,000 | 22.862745 | 72 | 0.557108 | false | 3.888179 | false | false | false |
pymedusa/SickRage | medusa/init/logconfig.py | 1 | 2929 | # coding=utf-8
"""Monkey-patch logger functions to accept enhanced format styles."""
from __future__ import unicode_literals
import logging
from builtins import object
try:
from inspect import getfullargspec
except ImportError:
from inspect import getargspec as getfullargspec
from six import text_type
class StyleAdapter(logging.LoggerAdapter):
"""Logger Adapter with new string format style."""
adapter_members = {attr: attr for attr in dir(logging.LoggerAdapter) if not callable(attr) and
not attr.startswith('__')}
adapter_members.update({'warn': 'warning', 'fatal': 'critical'})
reserved_keywords = getfullargspec(logging.Logger._log).args[1:]
def __init__(self, target_logger, extra=None):
"""Constructor.
:param target_logger:
:type target_logger: logging.Logger
:param extra:
:type extra: dict
"""
super(StyleAdapter, self).__init__(target_logger, extra)
def __getattr__(self, name):
"""Wrap to the actual logger.
:param name:
:type name: str
:return:
"""
if name not in self.adapter_members:
return getattr(self.logger, name)
return getattr(self, self.adapter_members[name])
def __setattr__(self, key, value):
"""Wrap to the actual logger.
:param key:
:type key: str
:param value:
"""
self.__dict__[key] = value
def process(self, msg, kwargs):
"""Enhance default process to use BraceMessage and remove unsupported keyword args for the actual logger method.
:param msg:
:param kwargs:
:return:
"""
reserved = {k: kwargs[k] for k in self.reserved_keywords if k in kwargs}
kwargs = {k: kwargs[k] for k in kwargs if k not in self.reserved_keywords}
return BraceMessage(msg, (), kwargs), reserved
class BraceMessage(object):
"""Log Message wrapper that applies new string format style."""
def __init__(self, fmt, args, kwargs):
"""Constructor.
:param fmt:
:type fmt: logging.Formatter
:param args:
:param kwargs:
"""
self.fmt = fmt
self.args = args
self.kwargs = kwargs
def __str__(self):
"""Represent a string.
:return:
:rtype: str
"""
result = text_type(self.fmt)
return result.format(*self.args, **self.kwargs) if self.args or self.kwargs else result
def initialize():
"""Replace standard getLogger with our enhanced one."""
def enhanced_get_logger(name=None):
"""Enhanced logging.getLogger function.
:param name:
:return:
"""
return StyleAdapter(standard_logger(name))
logging.getLogger = enhanced_get_logger
# Keeps the standard logging.getLogger to be used by StyleAdapter
standard_logger = logging.getLogger
| gpl-3.0 | 4,170,883,745,947,611,000 | 26.632075 | 120 | 0.613861 | false | 4.251089 | false | false | false |
zagl/led-tool | ui_main.py | 1 | 27811 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'ui_main.ui'
#
# Created: Sun Apr 3 16:50:44 2016
# by: PyQt4 UI code generator 4.11.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName(_fromUtf8("MainWindow"))
MainWindow.resize(687, 562)
self.centralwidget = QtGui.QWidget(MainWindow)
self.centralwidget.setObjectName(_fromUtf8("centralwidget"))
self.horizontalLayout = QtGui.QHBoxLayout(self.centralwidget)
self.horizontalLayout.setObjectName(_fromUtf8("horizontalLayout"))
self.main_stacked_widget = QtGui.QStackedWidget(self.centralwidget)
self.main_stacked_widget.setObjectName(_fromUtf8("main_stacked_widget"))
self.main_page = QtGui.QWidget()
self.main_page.setMinimumSize(QtCore.QSize(669, 544))
self.main_page.setObjectName(_fromUtf8("main_page"))
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.main_page)
self.horizontalLayout_2.setSpacing(6)
self.horizontalLayout_2.setObjectName(_fromUtf8("horizontalLayout_2"))
self.verticalLayout_3 = QtGui.QVBoxLayout()
self.verticalLayout_3.setContentsMargins(-1, -1, 0, -1)
self.verticalLayout_3.setObjectName(_fromUtf8("verticalLayout_3"))
self.led_filter = QtGui.QLineEdit(self.main_page)
self.led_filter.setObjectName(_fromUtf8("led_filter"))
self.verticalLayout_3.addWidget(self.led_filter)
self.led_list_view = QtGui.QListView(self.main_page)
self.led_list_view.setIconSize(QtCore.QSize(0, 0))
self.led_list_view.setGridSize(QtCore.QSize(0, 0))
self.led_list_view.setObjectName(_fromUtf8("led_list_view"))
self.verticalLayout_3.addWidget(self.led_list_view)
self.horizontalLayout_5 = QtGui.QHBoxLayout()
self.horizontalLayout_5.setSpacing(0)
self.horizontalLayout_5.setObjectName(_fromUtf8("horizontalLayout_5"))
self.add_led_button = QtGui.QToolButton(self.main_page)
self.add_led_button.setMinimumSize(QtCore.QSize(32, 32))
self.add_led_button.setAutoRaise(False)
self.add_led_button.setObjectName(_fromUtf8("add_led_button"))
self.horizontalLayout_5.addWidget(self.add_led_button)
self.remove_led_button = QtGui.QToolButton(self.main_page)
self.remove_led_button.setMinimumSize(QtCore.QSize(32, 32))
self.remove_led_button.setAutoRaise(False)
self.remove_led_button.setObjectName(_fromUtf8("remove_led_button"))
self.horizontalLayout_5.addWidget(self.remove_led_button)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.verticalLayout_3.addLayout(self.horizontalLayout_5)
self.horizontalLayout_2.addLayout(self.verticalLayout_3)
self.gridLayout = QtGui.QGridLayout()
self.gridLayout.setSizeConstraint(QtGui.QLayout.SetDefaultConstraint)
self.gridLayout.setContentsMargins(14, -1, -1, -1)
self.gridLayout.setObjectName(_fromUtf8("gridLayout"))
self.voltage_label = QtGui.QLabel(self.main_page)
self.voltage_label.setMinimumSize(QtCore.QSize(0, 0))
self.voltage_label.setObjectName(_fromUtf8("voltage_label"))
self.gridLayout.addWidget(self.voltage_label, 6, 1, 1, 1)
self.label = QtGui.QLabel(self.main_page)
self.label.setMinimumSize(QtCore.QSize(0, 30))
self.label.setObjectName(_fromUtf8("label"))
self.gridLayout.addWidget(self.label, 2, 0, 1, 1)
self.current_spinbox = QtGui.QDoubleSpinBox(self.main_page)
self.current_spinbox.setMaximum(9999999.0)
self.current_spinbox.setObjectName(_fromUtf8("current_spinbox"))
self.gridLayout.addWidget(self.current_spinbox, 2, 1, 1, 1)
self.label_8 = QtGui.QLabel(self.main_page)
self.label_8.setMinimumSize(QtCore.QSize(0, 30))
self.label_8.setObjectName(_fromUtf8("label_8"))
self.gridLayout.addWidget(self.label_8, 8, 0, 1, 1)
self.label_10 = QtGui.QLabel(self.main_page)
self.label_10.setMinimumSize(QtCore.QSize(0, 30))
self.label_10.setObjectName(_fromUtf8("label_10"))
self.gridLayout.addWidget(self.label_10, 11, 0, 1, 1)
self.input_power_label = QtGui.QLabel(self.main_page)
self.input_power_label.setObjectName(_fromUtf8("input_power_label"))
self.gridLayout.addWidget(self.input_power_label, 8, 1, 1, 1)
self.voltage_group_combobox = QtGui.QComboBox(self.main_page)
self.voltage_group_combobox.setMinimumSize(QtCore.QSize(0, 0))
self.voltage_group_combobox.setObjectName(_fromUtf8("voltage_group_combobox"))
self.gridLayout.addWidget(self.voltage_group_combobox, 5, 1, 1, 1)
self.label_9 = QtGui.QLabel(self.main_page)
self.label_9.setMinimumSize(QtCore.QSize(0, 30))
self.label_9.setObjectName(_fromUtf8("label_9"))
self.gridLayout.addWidget(self.label_9, 10, 0, 1, 1)
self.thermal_power_label = QtGui.QLabel(self.main_page)
self.thermal_power_label.setObjectName(_fromUtf8("thermal_power_label"))
self.gridLayout.addWidget(self.thermal_power_label, 10, 1, 1, 1)
self.label_5 = QtGui.QLabel(self.main_page)
self.label_5.setMinimumSize(QtCore.QSize(0, 30))
self.label_5.setObjectName(_fromUtf8("label_5"))
self.gridLayout.addWidget(self.label_5, 7, 0, 1, 1)
self.label_4 = QtGui.QLabel(self.main_page)
self.label_4.setMinimumSize(QtCore.QSize(0, 30))
self.label_4.setObjectName(_fromUtf8("label_4"))
self.gridLayout.addWidget(self.label_4, 5, 0, 1, 1)
self.radiant_flux_label = QtGui.QLabel(self.main_page)
self.radiant_flux_label.setObjectName(_fromUtf8("radiant_flux_label"))
self.gridLayout.addWidget(self.radiant_flux_label, 9, 1, 1, 1)
self.horizontalLayout_6 = QtGui.QHBoxLayout()
self.horizontalLayout_6.setObjectName(_fromUtf8("horizontalLayout_6"))
self.led_header_label = QtGui.QLabel(self.main_page)
self.led_header_label.setMinimumSize(QtCore.QSize(0, 32))
self.led_header_label.setObjectName(_fromUtf8("led_header_label"))
self.horizontalLayout_6.addWidget(self.led_header_label)
spacerItem1 = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_6.addItem(spacerItem1)
self.edit_led_button = QtGui.QPushButton(self.main_page)
self.edit_led_button.setMaximumSize(QtCore.QSize(75, 16777215))
self.edit_led_button.setFlat(False)
self.edit_led_button.setObjectName(_fromUtf8("edit_led_button"))
self.horizontalLayout_6.addWidget(self.edit_led_button)
self.gridLayout.addLayout(self.horizontalLayout_6, 1, 0, 1, 2)
self.label_2 = QtGui.QLabel(self.main_page)
self.label_2.setMinimumSize(QtCore.QSize(0, 30))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.gridLayout.addWidget(self.label_2, 3, 0, 1, 1)
self.luminous_flux_label = QtGui.QLabel(self.main_page)
self.luminous_flux_label.setTextInteractionFlags(QtCore.Qt.LinksAccessibleByMouse)
self.luminous_flux_label.setObjectName(_fromUtf8("luminous_flux_label"))
self.gridLayout.addWidget(self.luminous_flux_label, 7, 1, 1, 1)
spacerItem2 = QtGui.QSpacerItem(20, 40, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout.addItem(spacerItem2, 12, 0, 1, 1)
self.brightness_group_combobox = QtGui.QComboBox(self.main_page)
self.brightness_group_combobox.setObjectName(_fromUtf8("brightness_group_combobox"))
self.gridLayout.addWidget(self.brightness_group_combobox, 4, 1, 1, 1)
self.label_7 = QtGui.QLabel(self.main_page)
self.label_7.setMinimumSize(QtCore.QSize(0, 30))
self.label_7.setObjectName(_fromUtf8("label_7"))
self.gridLayout.addWidget(self.label_7, 9, 0, 1, 1)
self.label_3 = QtGui.QLabel(self.main_page)
self.label_3.setMinimumSize(QtCore.QSize(0, 30))
self.label_3.setObjectName(_fromUtf8("label_3"))
self.gridLayout.addWidget(self.label_3, 4, 0, 1, 1)
self.temperature_spinbox = QtGui.QDoubleSpinBox(self.main_page)
self.temperature_spinbox.setMaximum(9999999.0)
self.temperature_spinbox.setObjectName(_fromUtf8("temperature_spinbox"))
self.gridLayout.addWidget(self.temperature_spinbox, 3, 1, 1, 1)
self.label_6 = QtGui.QLabel(self.main_page)
self.label_6.setMinimumSize(QtCore.QSize(0, 30))
self.label_6.setObjectName(_fromUtf8("label_6"))
self.gridLayout.addWidget(self.label_6, 6, 0, 1, 1)
self.radiant_efficiency_label = QtGui.QLabel(self.main_page)
self.radiant_efficiency_label.setObjectName(_fromUtf8("radiant_efficiency_label"))
self.gridLayout.addWidget(self.radiant_efficiency_label, 11, 1, 1, 1)
self.gridLayout.setColumnStretch(1, 1)
self.horizontalLayout_2.addLayout(self.gridLayout)
self.horizontalLayout_2.setStretch(1, 1)
self.main_stacked_widget.addWidget(self.main_page)
self.editor_page = QtGui.QWidget()
self.editor_page.setObjectName(_fromUtf8("editor_page"))
self.verticalLayout_2 = QtGui.QVBoxLayout(self.editor_page)
self.verticalLayout_2.setObjectName(_fromUtf8("verticalLayout_2"))
self.horizontalLayout_4 = QtGui.QHBoxLayout()
self.horizontalLayout_4.setSpacing(20)
self.horizontalLayout_4.setSizeConstraint(QtGui.QLayout.SetMinimumSize)
self.horizontalLayout_4.setObjectName(_fromUtf8("horizontalLayout_4"))
self.led_editor_navigator_list = QtGui.QListWidget(self.editor_page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.MinimumExpanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.led_editor_navigator_list.sizePolicy().hasHeightForWidth())
self.led_editor_navigator_list.setSizePolicy(sizePolicy)
self.led_editor_navigator_list.setMinimumSize(QtCore.QSize(0, 0))
self.led_editor_navigator_list.setObjectName(_fromUtf8("led_editor_navigator_list"))
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
item = QtGui.QListWidgetItem()
self.led_editor_navigator_list.addItem(item)
self.horizontalLayout_4.addWidget(self.led_editor_navigator_list)
self.editor_stacked_widget = QtGui.QStackedWidget(self.editor_page)
self.editor_stacked_widget.setObjectName(_fromUtf8("editor_stacked_widget"))
self.parameter_page = QtGui.QWidget()
self.parameter_page.setObjectName(_fromUtf8("parameter_page"))
self.gridLayout_3 = QtGui.QGridLayout(self.parameter_page)
self.gridLayout_3.setMargin(0)
self.gridLayout_3.setVerticalSpacing(6)
self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3"))
self.manufacturer_combo = QtGui.QComboBox(self.parameter_page)
self.manufacturer_combo.setEditable(True)
self.manufacturer_combo.setObjectName(_fromUtf8("manufacturer_combo"))
self.manufacturer_combo.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.manufacturer_combo, 1, 2, 1, 1)
spacerItem3 = QtGui.QSpacerItem(0, 0, QtGui.QSizePolicy.Minimum, QtGui.QSizePolicy.Expanding)
self.gridLayout_3.addItem(spacerItem3, 7, 0, 1, 1)
self.label_20 = QtGui.QLabel(self.parameter_page)
self.label_20.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_20.setObjectName(_fromUtf8("label_20"))
self.gridLayout_3.addWidget(self.label_20, 3, 0, 1, 1)
self.label_18 = QtGui.QLabel(self.parameter_page)
self.label_18.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_18.setObjectName(_fromUtf8("label_18"))
self.gridLayout_3.addWidget(self.label_18, 1, 0, 1, 1)
self.label_19 = QtGui.QLabel(self.parameter_page)
self.label_19.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_19.setObjectName(_fromUtf8("label_19"))
self.gridLayout_3.addWidget(self.label_19, 2, 0, 1, 1)
self.label_21 = QtGui.QLabel(self.parameter_page)
self.label_21.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_21.setObjectName(_fromUtf8("label_21"))
self.gridLayout_3.addWidget(self.label_21, 4, 0, 1, 1)
self.name_edit = QtGui.QLineEdit(self.parameter_page)
self.name_edit.setObjectName(_fromUtf8("name_edit"))
self.gridLayout_3.addWidget(self.name_edit, 0, 2, 1, 1)
self.label_22 = QtGui.QLabel(self.parameter_page)
self.label_22.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_22.setObjectName(_fromUtf8("label_22"))
self.gridLayout_3.addWidget(self.label_22, 5, 0, 1, 1)
self.label_23 = QtGui.QLabel(self.parameter_page)
self.label_23.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_23.setObjectName(_fromUtf8("label_23"))
self.gridLayout_3.addWidget(self.label_23, 6, 0, 1, 1)
self.thermal_resistance_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.thermal_resistance_spin.setMaximum(9999999.0)
self.thermal_resistance_spin.setObjectName(_fromUtf8("thermal_resistance_spin"))
self.gridLayout_3.addWidget(self.thermal_resistance_spin, 5, 2, 1, 1)
self.label_17 = QtGui.QLabel(self.parameter_page)
self.label_17.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.label_17.setObjectName(_fromUtf8("label_17"))
self.gridLayout_3.addWidget(self.label_17, 0, 0, 1, 1)
self.reference_temperature_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.reference_temperature_spin.setMaximum(9999999.0)
self.reference_temperature_spin.setObjectName(_fromUtf8("reference_temperature_spin"))
self.gridLayout_3.addWidget(self.reference_temperature_spin, 6, 2, 1, 1)
self.typical_current_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.typical_current_spin.setMaximum(9999999.0)
self.typical_current_spin.setObjectName(_fromUtf8("typical_current_spin"))
self.gridLayout_3.addWidget(self.typical_current_spin, 4, 2, 1, 1)
self.typical_voltage_spin = QtGui.QDoubleSpinBox(self.parameter_page)
self.typical_voltage_spin.setMaximum(9999999.0)
self.typical_voltage_spin.setObjectName(_fromUtf8("typical_voltage_spin"))
self.gridLayout_3.addWidget(self.typical_voltage_spin, 3, 2, 1, 1)
self.family_combo = QtGui.QComboBox(self.parameter_page)
self.family_combo.setEditable(True)
self.family_combo.setObjectName(_fromUtf8("family_combo"))
self.family_combo.addItem(_fromUtf8(""))
self.gridLayout_3.addWidget(self.family_combo, 2, 2, 1, 1)
self.editor_stacked_widget.addWidget(self.parameter_page)
self.brightness_group_page = QtGui.QWidget()
self.brightness_group_page.setObjectName(_fromUtf8("brightness_group_page"))
self.verticalLayout = QtGui.QVBoxLayout(self.brightness_group_page)
self.verticalLayout.setMargin(0)
self.verticalLayout.setObjectName(_fromUtf8("verticalLayout"))
self.brightness_group_table_view = QtGui.QTableView(self.brightness_group_page)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.brightness_group_table_view.sizePolicy().hasHeightForWidth())
self.brightness_group_table_view.setSizePolicy(sizePolicy)
self.brightness_group_table_view.setObjectName(_fromUtf8("brightness_group_table_view"))
self.verticalLayout.addWidget(self.brightness_group_table_view)
self.editor_stacked_widget.addWidget(self.brightness_group_page)
self.voltage_group_page = QtGui.QWidget()
self.voltage_group_page.setObjectName(_fromUtf8("voltage_group_page"))
self.horizontalLayout_3 = QtGui.QHBoxLayout(self.voltage_group_page)
self.horizontalLayout_3.setMargin(0)
self.horizontalLayout_3.setObjectName(_fromUtf8("horizontalLayout_3"))
self.voltage_group_table_view = QtGui.QTableView(self.voltage_group_page)
self.voltage_group_table_view.setObjectName(_fromUtf8("voltage_group_table_view"))
self.voltage_group_table_view.verticalHeader().setCascadingSectionResizes(True)
self.voltage_group_table_view.verticalHeader().setDefaultSectionSize(0)
self.horizontalLayout_3.addWidget(self.voltage_group_table_view)
self.editor_stacked_widget.addWidget(self.voltage_group_page)
self.current_emission_page = QtGui.QWidget()
self.current_emission_page.setObjectName(_fromUtf8("current_emission_page"))
self.verticalLayout_5 = QtGui.QVBoxLayout(self.current_emission_page)
self.verticalLayout_5.setMargin(0)
self.verticalLayout_5.setObjectName(_fromUtf8("verticalLayout_5"))
self.current_emission_table_view = QtGui.QTableView(self.current_emission_page)
self.current_emission_table_view.setObjectName(_fromUtf8("current_emission_table_view"))
self.verticalLayout_5.addWidget(self.current_emission_table_view)
self.editor_stacked_widget.addWidget(self.current_emission_page)
self.current_voltage_page = QtGui.QWidget()
self.current_voltage_page.setObjectName(_fromUtf8("current_voltage_page"))
self.verticalLayout_6 = QtGui.QVBoxLayout(self.current_voltage_page)
self.verticalLayout_6.setMargin(0)
self.verticalLayout_6.setObjectName(_fromUtf8("verticalLayout_6"))
self.current_voltage_table_view = QtGui.QTableView(self.current_voltage_page)
self.current_voltage_table_view.setObjectName(_fromUtf8("current_voltage_table_view"))
self.verticalLayout_6.addWidget(self.current_voltage_table_view)
self.editor_stacked_widget.addWidget(self.current_voltage_page)
self.temperature_emission_page = QtGui.QWidget()
self.temperature_emission_page.setObjectName(_fromUtf8("temperature_emission_page"))
self.horizontalLayout_7 = QtGui.QHBoxLayout(self.temperature_emission_page)
self.horizontalLayout_7.setMargin(0)
self.horizontalLayout_7.setObjectName(_fromUtf8("horizontalLayout_7"))
self.temperature_emission_table_view = QtGui.QTableView(self.temperature_emission_page)
self.temperature_emission_table_view.setObjectName(_fromUtf8("temperature_emission_table_view"))
self.horizontalLayout_7.addWidget(self.temperature_emission_table_view)
self.editor_stacked_widget.addWidget(self.temperature_emission_page)
self.temperature_voltage_page = QtGui.QWidget()
self.temperature_voltage_page.setObjectName(_fromUtf8("temperature_voltage_page"))
self.horizontalLayout_8 = QtGui.QHBoxLayout(self.temperature_voltage_page)
self.horizontalLayout_8.setMargin(0)
self.horizontalLayout_8.setObjectName(_fromUtf8("horizontalLayout_8"))
self.temperature_voltage_table_view = QtGui.QTableView(self.temperature_voltage_page)
self.temperature_voltage_table_view.setObjectName(_fromUtf8("temperature_voltage_table_view"))
self.horizontalLayout_8.addWidget(self.temperature_voltage_table_view)
self.editor_stacked_widget.addWidget(self.temperature_voltage_page)
self.horizontalLayout_4.addWidget(self.editor_stacked_widget)
self.horizontalLayout_4.setStretch(1, 2)
self.verticalLayout_2.addLayout(self.horizontalLayout_4)
self.buttonBox = QtGui.QDialogButtonBox(self.editor_page)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.verticalLayout_2.addWidget(self.buttonBox)
self.main_stacked_widget.addWidget(self.editor_page)
self.horizontalLayout.addWidget(self.main_stacked_widget)
MainWindow.setCentralWidget(self.centralwidget)
self.voltage_label.setBuddy(self.voltage_label)
self.retranslateUi(MainWindow)
self.main_stacked_widget.setCurrentIndex(0)
self.editor_stacked_widget.setCurrentIndex(0)
QtCore.QObject.connect(self.led_editor_navigator_list, QtCore.SIGNAL(_fromUtf8("currentRowChanged(int)")), self.editor_stacked_widget.setCurrentIndex)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
MainWindow.setTabOrder(self.name_edit, self.manufacturer_combo)
MainWindow.setTabOrder(self.manufacturer_combo, self.family_combo)
MainWindow.setTabOrder(self.family_combo, self.typical_voltage_spin)
MainWindow.setTabOrder(self.typical_voltage_spin, self.typical_current_spin)
MainWindow.setTabOrder(self.typical_current_spin, self.thermal_resistance_spin)
MainWindow.setTabOrder(self.thermal_resistance_spin, self.reference_temperature_spin)
MainWindow.setTabOrder(self.reference_temperature_spin, self.buttonBox)
MainWindow.setTabOrder(self.buttonBox, self.led_editor_navigator_list)
MainWindow.setTabOrder(self.led_editor_navigator_list, self.brightness_group_combobox)
MainWindow.setTabOrder(self.brightness_group_combobox, self.voltage_group_combobox)
MainWindow.setTabOrder(self.voltage_group_combobox, self.edit_led_button)
MainWindow.setTabOrder(self.edit_led_button, self.led_filter)
MainWindow.setTabOrder(self.led_filter, self.led_list_view)
MainWindow.setTabOrder(self.led_list_view, self.add_led_button)
MainWindow.setTabOrder(self.add_led_button, self.remove_led_button)
MainWindow.setTabOrder(self.remove_led_button, self.temperature_spinbox)
MainWindow.setTabOrder(self.temperature_spinbox, self.current_spinbox)
MainWindow.setTabOrder(self.current_spinbox, self.brightness_group_table_view)
MainWindow.setTabOrder(self.brightness_group_table_view, self.voltage_group_table_view)
def retranslateUi(self, MainWindow):
MainWindow.setWindowTitle(_translate("MainWindow", "LED Tool", None))
self.led_filter.setPlaceholderText(_translate("MainWindow", "Filter", None))
self.add_led_button.setText(_translate("MainWindow", "+", None))
self.remove_led_button.setText(_translate("MainWindow", "−", None))
self.voltage_label.setText(_translate("MainWindow", "--- V", None))
self.label.setText(_translate("MainWindow", "Current:", None))
self.current_spinbox.setSuffix(_translate("MainWindow", " mA", None))
self.label_8.setText(_translate("MainWindow", "Input Power:", None))
self.label_10.setText(_translate("MainWindow", "Radiant Efficiency:", None))
self.input_power_label.setText(_translate("MainWindow", "--- W", None))
self.label_9.setText(_translate("MainWindow", "Thermal Power:", None))
self.thermal_power_label.setText(_translate("MainWindow", "--- W", None))
self.label_5.setText(_translate("MainWindow", "Luminous Flux:", None))
self.label_4.setText(_translate("MainWindow", "Voltage Group:", None))
self.radiant_flux_label.setText(_translate("MainWindow", "--- W", None))
self.led_header_label.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:12pt; font-weight:600;\">LUMILEDS LUXEON F ES</span></p></body></html>", None))
self.edit_led_button.setText(_translate("MainWindow", "Edit", None))
self.label_2.setText(_translate("MainWindow", "Temperature:", None))
self.luminous_flux_label.setText(_translate("MainWindow", "--- lm", None))
self.label_7.setText(_translate("MainWindow", "Radiant Flux:", None))
self.label_3.setText(_translate("MainWindow", "Brightness Group:", None))
self.temperature_spinbox.setSuffix(_translate("MainWindow", " °C", None))
self.label_6.setText(_translate("MainWindow", "Voltage:", None))
self.radiant_efficiency_label.setText(_translate("MainWindow", "--- %", None))
__sortingEnabled = self.led_editor_navigator_list.isSortingEnabled()
self.led_editor_navigator_list.setSortingEnabled(False)
item = self.led_editor_navigator_list.item(0)
item.setText(_translate("MainWindow", "Parameter", None))
item = self.led_editor_navigator_list.item(1)
item.setText(_translate("MainWindow", "Brightness Groups", None))
item = self.led_editor_navigator_list.item(2)
item.setText(_translate("MainWindow", "Voltage Groups", None))
item = self.led_editor_navigator_list.item(3)
item.setText(_translate("MainWindow", "Current vs. Emission", None))
item = self.led_editor_navigator_list.item(4)
item.setText(_translate("MainWindow", "Current vs. Voltage", None))
item = self.led_editor_navigator_list.item(5)
item.setText(_translate("MainWindow", "Temperature vs. Emission", None))
item = self.led_editor_navigator_list.item(6)
item.setText(_translate("MainWindow", "Temperature vs. Voltage", None))
self.led_editor_navigator_list.setSortingEnabled(__sortingEnabled)
self.manufacturer_combo.setItemText(0, _translate("MainWindow", "OSRAM", None))
self.label_20.setText(_translate("MainWindow", "Typical Voltage:", None))
self.label_18.setText(_translate("MainWindow", "Manufacturer:", None))
self.label_19.setText(_translate("MainWindow", "Family:", None))
self.label_21.setText(_translate("MainWindow", "Typical Current:", None))
self.name_edit.setText(_translate("MainWindow", "LA T67F", None))
self.label_22.setText(_translate("MainWindow", "Thermal Resistance:", None))
self.label_23.setText(_translate("MainWindow", "Reference Temperature:", None))
self.thermal_resistance_spin.setSuffix(_translate("MainWindow", " K/W", None))
self.label_17.setText(_translate("MainWindow", "Name:", None))
self.reference_temperature_spin.setSuffix(_translate("MainWindow", " °C", None))
self.typical_current_spin.setSuffix(_translate("MainWindow", " mA", None))
self.typical_voltage_spin.setSuffix(_translate("MainWindow", " V", None))
self.family_combo.setItemText(0, _translate("MainWindow", "TOPLED", None))
| gpl-3.0 | 8,376,805,521,591,974,000 | 64.893365 | 190 | 0.706405 | false | 3.624951 | false | false | false |
ukch/refugeedata | refugeedata/distribution/decorators.py | 1 | 2228 | import datetime
import functools
from pyratemp import TemplateSyntaxError, TemplateRenderError
from django.contrib.auth import PermissionDenied
from django.shortcuts import get_object_or_404, redirect, render
from refugeedata.models import Distribution, Template
from .forms import DistributionHashForm
def standard_distribution_access(func):
@functools.wraps(func)
def wrapper(request, *args, **kwargs):
dist = get_object_or_404(
Distribution, id=kwargs.pop('distribution_id'))
if not request.user.is_superuser:
if dist.date != datetime.date.today():
raise PermissionDenied()
if not request.user.has_perm("distribution", obj=dist):
if request.method == "POST":
form = DistributionHashForm(dist, request.POST)
if form.is_valid():
request.session["distribution_hash"] = \
form.cleaned_data["password"]
return redirect(request.path)
else:
form = DistributionHashForm(dist)
return render(request, "distribution/login.html", {
"distribution": dist,
"form": form,
})
kwargs['distribution'] = dist
return func(request, *args, **kwargs)
return wrapper
def handle_template_errors(func):
@functools.wraps(func)
def wrapper(request, distribution_id, *args, **kwargs):
distribution = get_object_or_404(Distribution, id=distribution_id)
try:
return func(request, distribution, *args, **kwargs)
except (TemplateSyntaxError, TemplateRenderError) as e:
if hasattr(e, "filename"):
template_id = e.filename
else:
template_id = kwargs.get("template_id")
if template_id:
template = Template.objects.filter(id=template_id).first()
return render(request, "distribution/template_syntax_error.html", {
"distribution": distribution,
"template": template,
"exception": e,
}, status=400)
return wrapper
| mit | -3,910,878,151,373,112,300 | 35.52459 | 79 | 0.584829 | false | 4.740426 | false | false | false |
Distrotech/reportlab | src/reportlab/platypus/flowables.py | 1 | 72939 | #Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/platypus/flowables.py
__version__=''' $Id$ '''
__doc__="""
A flowable is a "floating element" in a document whose exact position is determined by the
other elements that precede it, such as a paragraph, a diagram interspersed between paragraphs,
a section header, etcetera. Examples of non-flowables include page numbering annotations,
headers, footers, fixed diagrams or logos, among others.
Flowables are defined here as objects which know how to determine their size and which
can draw themselves onto a page with respect to a relative "origin" position determined
at a higher level. The object's draw() method should assume that (0,0) corresponds to the
bottom left corner of the enclosing rectangle that will contain the object. The attributes
vAlign and hAlign may be used by 'packers' as hints as to how the object should be placed.
Some Flowables also know how to "split themselves". For example a
long paragraph might split itself between one page and the next.
Packers should set the canv attribute during wrap, split & draw operations to allow
the flowable to work out sizes etc in the proper context.
The "text" of a document usually consists mainly of a sequence of flowables which
flow into a document from top to bottom (with column and page breaks controlled by
higher level components).
"""
import os
from copy import deepcopy, copy
from reportlab.lib.colors import red, gray, lightgrey
from reportlab.lib.rl_accel import fp_str
from reportlab.lib.enums import TA_LEFT, TA_CENTER, TA_RIGHT, TA_JUSTIFY
from reportlab.lib.styles import _baseFontName
from reportlab.lib.utils import strTypes
from reportlab.pdfbase import pdfutils
from reportlab.pdfbase.pdfmetrics import stringWidth
from reportlab.rl_config import _FUZZ, overlapAttachedSpace, ignoreContainerActions, listWrapOnFakeWidth
import collections
__all__=('TraceInfo','Flowable','XBox','Preformatted','Image','Spacer','PageBreak','SlowPageBreak',
'CondPageBreak','KeepTogether','Macro','CallerMacro','ParagraphAndImage',
'FailOnWrap','HRFlowable','PTOContainer','KeepInFrame','UseUpSpace',
'ListFlowable','ListItem','DDIndenter','LIIndenter',
'DocAssign', 'DocExec', 'DocAssert', 'DocPara', 'DocIf', 'DocWhile',
'PageBreakIfNotEmpty',
)
class TraceInfo:
"Holder for info about where an object originated"
def __init__(self):
self.srcFile = '(unknown)'
self.startLineNo = -1
self.startLinePos = -1
self.endLineNo = -1
self.endLinePos = -1
#############################################################
# Flowable Objects - a base class and a few examples.
# One is just a box to get some metrics. We also have
# a paragraph, an image and a special 'page break'
# object which fills the space.
#############################################################
class Flowable:
"""Abstract base class for things to be drawn. Key concepts:
1. It knows its size
2. It draws in its own coordinate system (this requires the
base API to provide a translate() function.
"""
_fixedWidth = 0 #assume wrap results depend on arguments?
_fixedHeight = 0
def __init__(self):
self.width = 0
self.height = 0
self.wrapped = 0
#these are hints to packers/frames as to how the floable should be positioned
self.hAlign = 'LEFT' #CENTER/CENTRE or RIGHT
self.vAlign = 'BOTTOM' #MIDDLE or TOP
#optional holder for trace info
self._traceInfo = None
self._showBoundary = None
#many flowables handle text and must be processed in the
#absence of a canvas. tagging them with their encoding
#helps us to get conversions right. Use Python codec names.
self.encoding = None
def _drawOn(self,canv):
'''ensure canv is set on and then draw'''
self.canv = canv
self.draw()#this is the bit you overload
del self.canv
def _hAlignAdjust(self,x,sW=0):
if sW and hasattr(self,'hAlign'):
a = self.hAlign
if a in ('CENTER','CENTRE', TA_CENTER):
x += 0.5*sW
elif a in ('RIGHT',TA_RIGHT):
x += sW
elif a not in ('LEFT',TA_LEFT):
raise ValueError("Bad hAlign value "+str(a))
return x
def drawOn(self, canvas, x, y, _sW=0):
"Tell it to draw itself on the canvas. Do not override"
x = self._hAlignAdjust(x,_sW)
canvas.saveState()
canvas.translate(x, y)
self._drawOn(canvas)
if hasattr(self, '_showBoundary') and self._showBoundary:
#diagnostic tool support
canvas.setStrokeColor(gray)
canvas.rect(0,0,self.width, self.height)
canvas.restoreState()
def wrapOn(self, canv, aW, aH):
'''intended for use by packers allows setting the canvas on
during the actual wrap'''
self.canv = canv
w, h = self.wrap(aW,aH)
del self.canv
return w, h
def wrap(self, availWidth, availHeight):
"""This will be called by the enclosing frame before objects
are asked their size, drawn or whatever. It returns the
size actually used."""
return (self.width, self.height)
def minWidth(self):
"""This should return the minimum required width"""
return getattr(self,'_minWidth',self.width)
def splitOn(self, canv, aW, aH):
'''intended for use by packers allows setting the canvas on
during the actual split'''
self.canv = canv
S = self.split(aW,aH)
del self.canv
return S
def split(self, availWidth, availheight):
"""This will be called by more sophisticated frames when
wrap fails. Stupid flowables should return []. Clever flowables
should split themselves and return a list of flowables.
If they decide that nothing useful can be fitted in the
available space (e.g. if you have a table and not enough
space for the first row), also return []"""
return []
def getKeepWithNext(self):
"""returns boolean determining whether the next flowable should stay with this one"""
if hasattr(self,'keepWithNext'): return self.keepWithNext
elif hasattr(self,'style') and hasattr(self.style,'keepWithNext'): return self.style.keepWithNext
else: return 0
def getSpaceAfter(self):
"""returns how much space should follow this item if another item follows on the same page."""
if hasattr(self,'spaceAfter'): return self.spaceAfter
elif hasattr(self,'style') and hasattr(self.style,'spaceAfter'): return self.style.spaceAfter
else: return 0
def getSpaceBefore(self):
"""returns how much space should precede this item if another item precedess on the same page."""
if hasattr(self,'spaceBefore'): return self.spaceBefore
elif hasattr(self,'style') and hasattr(self.style,'spaceBefore'): return self.style.spaceBefore
else: return 0
def isIndexing(self):
"""Hook for IndexingFlowables - things which have cross references"""
return 0
def identity(self, maxLen=None):
'''
This method should attempt to return a string that can be used to identify
a particular flowable uniquely. The result can then be used for debugging
and or error printouts
'''
if hasattr(self, 'getPlainText'):
r = self.getPlainText(identify=1)
elif hasattr(self, 'text'):
r = str(self.text)
else:
r = '...'
if r and maxLen:
r = r[:maxLen]
return "<%s at %s%s>%s" % (self.__class__.__name__, hex(id(self)), self._frameName(), r)
def _doctemplateAttr(self,a):
return getattr(getattr(getattr(self,'canv',None),'_doctemplate',None),a,None)
def _frameName(self):
f = getattr(self,'_frame',None)
if not f: f = self._doctemplateAttr('frame')
if f and f.id: return ' frame=%s' % f.id
return ''
class XBox(Flowable):
"""Example flowable - a box with an x through it and a caption.
This has a known size, so does not need to respond to wrap()."""
def __init__(self, width, height, text = 'A Box'):
Flowable.__init__(self)
self.width = width
self.height = height
self.text = text
def __repr__(self):
return "XBox(w=%s, h=%s, t=%s)" % (self.width, self.height, self.text)
def draw(self):
self.canv.rect(0, 0, self.width, self.height)
self.canv.line(0, 0, self.width, self.height)
self.canv.line(0, self.height, self.width, 0)
#centre the text
self.canv.setFont(_baseFontName,12)
self.canv.drawCentredString(0.5*self.width, 0.5*self.height, self.text)
def _trimEmptyLines(lines):
#don't want the first or last to be empty
while len(lines) and lines[0].strip() == '':
lines = lines[1:]
while len(lines) and lines[-1].strip() == '':
lines = lines[:-1]
return lines
def _dedenter(text,dedent=0):
'''
tidy up text - carefully, it is probably code. If people want to
indent code within a source script, you can supply an arg to dedent
and it will chop off that many character, otherwise it leaves
left edge intact.
'''
lines = text.split('\n')
if dedent>0:
templines = _trimEmptyLines(lines)
lines = []
for line in templines:
line = line[dedent:].rstrip()
lines.append(line)
else:
lines = _trimEmptyLines(lines)
return lines
SPLIT_CHARS = "[{( ,.;:/\\-"
def splitLines(lines, maximum_length, split_characters, new_line_characters):
if split_characters is None:
split_characters = SPLIT_CHARS
if new_line_characters is None:
new_line_characters = ""
# Return a table of lines
lines_splitted = []
for line in lines:
if len(line) > maximum_length:
splitLine(line, lines_splitted, maximum_length, \
split_characters, new_line_characters)
else:
lines_splitted.append(line)
return lines_splitted
def splitLine(line_to_split, lines_splitted, maximum_length, \
split_characters, new_line_characters):
# Used to implement the characters added
#at the beginning of each new line created
first_line = True
# Check if the text can be splitted
while line_to_split and len(line_to_split)>0:
# Index of the character where we can split
split_index = 0
# Check if the line length still exceeds the maximum length
if len(line_to_split) <= maximum_length:
# Return the remaining of the line
split_index = len(line_to_split)
else:
# Iterate for each character of the line
for line_index in range(maximum_length):
# Check if the character is in the list
# of allowed characters to split on
if line_to_split[line_index] in split_characters:
split_index = line_index + 1
# If the end of the line was reached
# with no character to split on
if split_index==0:
split_index = line_index + 1
if first_line:
lines_splitted.append(line_to_split[0:split_index])
first_line = False
maximum_length -= len(new_line_characters)
else:
lines_splitted.append(new_line_characters + \
line_to_split[0:split_index])
# Remaining text to split
line_to_split = line_to_split[split_index:]
class Preformatted(Flowable):
"""This is like the HTML <PRE> tag.
It attempts to display text exactly as you typed it in a fixed width "typewriter" font.
By default the line breaks are exactly where you put them, and it will not be wrapped.
You can optionally define a maximum line length and the code will be wrapped; and
extra characters to be inserted at the beginning of each wrapped line (e.g. '> ').
"""
def __init__(self, text, style, bulletText = None, dedent=0, maxLineLength=None, splitChars=None, newLineChars=""):
"""text is the text to display. If dedent is set then common leading space
will be chopped off the front (for example if the entire text is indented
6 spaces or more then each line will have 6 spaces removed from the front).
"""
self.style = style
self.bulletText = bulletText
self.lines = _dedenter(text,dedent)
if text and maxLineLength:
self.lines = splitLines(
self.lines,
maxLineLength,
splitChars,
newLineChars
)
def __repr__(self):
bT = self.bulletText
H = "Preformatted("
if bT is not None:
H = "Preformatted(bulletText=%s," % repr(bT)
return "%s'''\\ \n%s''')" % (H, '\n'.join(self.lines))
def wrap(self, availWidth, availHeight):
self.width = availWidth
self.height = self.style.leading*len(self.lines)
return (self.width, self.height)
def minWidth(self):
style = self.style
fontSize = style.fontSize
fontName = style.fontName
return max([stringWidth(line,fontName,fontSize) for line in self.lines])
def split(self, availWidth, availHeight):
#returns two Preformatted objects
#not sure why they can be called with a negative height
if availHeight < self.style.leading:
return []
linesThatFit = int(availHeight * 1.0 / self.style.leading)
text1 = '\n'.join(self.lines[0:linesThatFit])
text2 = '\n'.join(self.lines[linesThatFit:])
style = self.style
if style.firstLineIndent != 0:
style = deepcopy(style)
style.firstLineIndent = 0
return [Preformatted(text1, self.style), Preformatted(text2, style)]
def draw(self):
#call another method for historical reasons. Besides, I
#suspect I will be playing with alternate drawing routines
#so not doing it here makes it easier to switch.
cur_x = self.style.leftIndent
cur_y = self.height - self.style.fontSize
self.canv.addLiteral('%PreformattedPara')
if self.style.textColor:
self.canv.setFillColor(self.style.textColor)
tx = self.canv.beginText(cur_x, cur_y)
#set up the font etc.
tx.setFont( self.style.fontName,
self.style.fontSize,
self.style.leading)
for text in self.lines:
tx.textLine(text)
self.canv.drawText(tx)
class Image(Flowable):
"""an image (digital picture). Formats supported by PIL/Java 1.4 (the Python/Java Imaging Library
are supported. Images as flowables may be aligned horizontally in the
frame with the hAlign parameter - accepted values are 'CENTER',
'LEFT' or 'RIGHT' with 'CENTER' being the default.
We allow for two kinds of lazyness to allow for many images in a document
which could lead to file handle starvation.
lazy=1 don't open image until required.
lazy=2 open image when required then shut it.
"""
_fixedWidth = 1
_fixedHeight = 1
def __init__(self, filename, width=None, height=None, kind='direct',
mask="auto", lazy=1, hAlign='CENTER'):
"""If size to draw at not specified, get it from the image."""
self.hAlign = hAlign
self._mask = mask
fp = hasattr(filename,'read')
if fp:
self._file = filename
self.filename = repr(filename)
else:
self._file = self.filename = filename
if not fp and os.path.splitext(filename)[1] in ['.jpg', '.JPG', '.jpeg', '.JPEG']:
# if it is a JPEG, will be inlined within the file -
# but we still need to know its size now
from reportlab.lib.utils import open_for_read
f = open_for_read(filename, 'b')
try:
try:
info = pdfutils.readJPEGInfo(f)
except:
#couldn't read as a JPEG, try like normal
self._setup(width,height,kind,lazy)
return
finally:
f.close()
self.imageWidth = info[0]
self.imageHeight = info[1]
self._img = None
self._setup(width,height,kind,0)
elif fp:
self._setup(width,height,kind,0)
else:
self._setup(width,height,kind,lazy)
def _setup(self,width,height,kind,lazy):
self._lazy = lazy
self._width = width
self._height = height
self._kind = kind
if lazy<=0: self._setup_inner()
def _setup_inner(self):
width = self._width
height = self._height
kind = self._kind
img = self._img
if img: self.imageWidth, self.imageHeight = img.getSize()
if self._lazy>=2: del self._img
if kind in ['direct','absolute']:
self.drawWidth = width or self.imageWidth
self.drawHeight = height or self.imageHeight
elif kind in ['percentage','%']:
self.drawWidth = self.imageWidth*width*0.01
self.drawHeight = self.imageHeight*height*0.01
elif kind in ['bound','proportional']:
factor = min(float(width)/self.imageWidth,float(height)/self.imageHeight)
self.drawWidth = self.imageWidth*factor
self.drawHeight = self.imageHeight*factor
def _restrictSize(self,aW,aH):
if self.drawWidth>aW+_FUZZ or self.drawHeight>aH+_FUZZ:
self._oldDrawSize = self.drawWidth, self.drawHeight
factor = min(float(aW)/self.drawWidth,float(aH)/self.drawHeight)
self.drawWidth *= factor
self.drawHeight *= factor
return self.drawWidth, self.drawHeight
def _unRestrictSize(self):
dwh = getattr(self,'_oldDrawSize',None)
if dwh:
self.drawWidth, self.drawHeight = dwh
def __getattr__(self,a):
if a=='_img':
from reportlab.lib.utils import ImageReader #this may raise an error
self._img = ImageReader(self._file)
if not isinstance(self._file,strTypes):
self._file = None
if self._lazy>=2: self._lazy = 1 #here we're assuming we cannot read again
return self._img
elif a in ('drawWidth','drawHeight','imageWidth','imageHeight'):
self._setup_inner()
return self.__dict__[a]
raise AttributeError("<Image @ 0x%x>.%s" % (id(self),a))
def wrap(self, availWidth, availHeight):
#the caller may decide it does not fit.
return self.drawWidth, self.drawHeight
def draw(self):
lazy = self._lazy
if lazy>=2: self._lazy = 1
self.canv.drawImage( self._img or self.filename,
getattr(self,'_offs_x',0),
getattr(self,'_offs_y',0),
self.drawWidth,
self.drawHeight,
mask=self._mask,
)
if lazy>=2:
self._img = self._file = None
self._lazy = lazy
def identity(self,maxLen=None):
r = Flowable.identity(self,maxLen)
if r[-4:]=='>...' and isinstance(self.filename,str):
r = "%s filename=%s>" % (r[:-4],self.filename)
return r
class NullDraw(Flowable):
def draw(self):
pass
class Spacer(NullDraw):
"""A spacer just takes up space and doesn't draw anything - it guarantees
a gap between objects."""
_fixedWidth = 1
_fixedHeight = 1
def __init__(self, width, height, isGlue=False):
self.width = width
if isGlue:
self.height = 1e-4
self.spacebefore = height
self.height = height
def __repr__(self):
return "%s(%s, %s)" % (self.__class__.__name__,self.width, self.height)
class UseUpSpace(NullDraw):
def __init__(self):
pass
def __repr__(self):
return "%s()" % self.__class__.__name__
def wrap(self, availWidth, availHeight):
self.width = availWidth
self.height = availHeight
return (availWidth,availHeight-1e-8) #step back a point
class PageBreak(UseUpSpace):
"""Move on to the next page in the document.
This works by consuming all remaining space in the frame!"""
def __init__(self,nextTemplate=None):
self.nextTemplate = nextTemplate
class SlowPageBreak(PageBreak):
pass
class PageBreakIfNotEmpty(PageBreak):
pass
class CondPageBreak(Spacer):
"""use up a frame if not enough vertical space effectively CondFrameBreak"""
def __init__(self, height):
self.height = height
def __repr__(self):
return "CondPageBreak(%s)" %(self.height,)
def wrap(self, availWidth, availHeight):
if availHeight<self.height:
f = self._doctemplateAttr('frame')
if not f: return availWidth, availHeight
from reportlab.platypus.doctemplate import FrameBreak
f.add_generated_content(FrameBreak)
return 0, 0
def identity(self,maxLen=None):
return repr(self).replace(')',',frame=%s)'%self._frameName())
def _listWrapOn(F,availWidth,canv,mergeSpace=1,obj=None,dims=None,fakeWidth=None):
'''return max width, required height for a list of flowables F'''
doct = getattr(canv,'_doctemplate',None)
cframe = getattr(doct,'frame',None)
if fakeWidth is None:
fakeWidth = listWrapOnFakeWidth
if cframe:
from reportlab.platypus.doctemplate import _addGeneratedContent, Indenter
doct_frame = cframe
cframe = doct.frame = deepcopy(doct_frame)
cframe._generated_content = None
del cframe._generated_content
try:
W = 0
H = 0
pS = 0
atTop = 1
F = F[:]
while F:
f = F.pop(0)
if hasattr(f,'frameAction'):
from reportlab.platypus.doctemplate import Indenter
if isinstance(f,Indenter):
availWidth -= f.left+f.right
continue
w,h = f.wrapOn(canv,availWidth,0xfffffff)
if dims is not None: dims.append((w,h))
if cframe:
_addGeneratedContent(F,cframe)
if w<=_FUZZ or h<=_FUZZ: continue
W = max(W,min(w,availWidth) if fakeWidth else w)
H += h
if not atTop:
h = f.getSpaceBefore()
if mergeSpace:
if getattr(f,'_SPACETRANSFER',False):
h = pS
h = max(h-pS,0)
H += h
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
s = f.getSpaceAfter()
if getattr(f,'_SPACETRANSFER',False):
s = pS
pS = s
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H-pS
finally:
if cframe:
doct.frame = doct_frame
def _flowableSublist(V):
"if it isn't a list or tuple, wrap it in a list"
if not isinstance(V,(list,tuple)): V = V is not None and [V] or []
from reportlab.platypus.doctemplate import LCActionFlowable
assert not [x for x in V if isinstance(x,LCActionFlowable)],'LCActionFlowables not allowed in sublists'
return V
class _ContainerSpace: #Abstract some common container like behaviour
def getSpaceBefore(self):
for c in self._content:
if not hasattr(c,'frameAction'):
return c.getSpaceBefore()
return 0
def getSpaceAfter(self,content=None):
#this needs 2.4
#for c in reversed(content or self._content):
reverseContent = (content or self._content)[:]
reverseContent.reverse()
for c in reverseContent:
if not hasattr(c,'frameAction'):
return c.getSpaceAfter()
return 0
class KeepTogether(_ContainerSpace,Flowable):
def __init__(self,flowables,maxHeight=None):
self._content = _flowableSublist(flowables)
self._maxHeight = maxHeight
def __repr__(self):
f = self._content
L = list(map(repr,f))
L = "\n"+"\n".join(L)
L = L.replace("\n", "\n ")
return "%s(%s,maxHeight=%s)" % (self.__class__.__name__,L,self._maxHeight)
def wrap(self, aW, aH):
dims = []
W,H = _listWrapOn(self._content,aW,self.canv,dims=dims)
self._H = H
self._H0 = dims and dims[0][1] or 0
self._wrapInfo = aW,aH
return W, 0xffffff # force a split
def split(self, aW, aH):
if getattr(self,'_wrapInfo',None)!=(aW,aH): self.wrap(aW,aH)
S = self._content[:]
atTop = getattr(self,'_frame',None)
if atTop: atTop = getattr(atTop,'_atTop',None)
C0 = self._H>aH and (not self._maxHeight or aH>self._maxHeight)
C1 = (self._H0>aH) or C0 and atTop
if C0 or C1:
if C0:
from reportlab.platypus.doctemplate import FrameBreak
A = FrameBreak
else:
from reportlab.platypus.doctemplate import NullActionFlowable
A = NullActionFlowable
S.insert(0,A())
return S
def identity(self, maxLen=None):
msg = "<%s at %s%s> containing :%s" % (self.__class__.__name__,hex(id(self)),self._frameName(),"\n".join([f.identity() for f in self._content]))
if maxLen:
return msg[0:maxLen]
else:
return msg
class Macro(Flowable):
"""This is not actually drawn (i.e. it has zero height)
but is executed when it would fit in the frame. Allows direct
access to the canvas through the object 'canvas'"""
def __init__(self, command):
self.command = command
def __repr__(self):
return "Macro(%s)" % repr(self.command)
def wrap(self, availWidth, availHeight):
return (0,0)
def draw(self):
exec(self.command, globals(), {'canvas':self.canv})
def _nullCallable(*args,**kwds):
pass
class CallerMacro(Flowable):
'''
like Macro, but with callable command(s)
drawCallable(self)
wrapCallable(self,aW,aH)
'''
def __init__(self, drawCallable=None, wrapCallable=None):
self._drawCallable = drawCallable or _nullCallable
self._wrapCallable = wrapCallable or _nullCallable
def __repr__(self):
return "CallerMacro(%r,%r)" % (self._drawCallable,self._wrapCallable)
def wrap(self, aW, aH):
self._wrapCallable(self,aW,aH)
return (0,0)
def draw(self):
self._drawCallable(self)
class ParagraphAndImage(Flowable):
'''combine a Paragraph and an Image'''
def __init__(self,P,I,xpad=3,ypad=3,side='right'):
self.P = P
self.I = I
self.xpad = xpad
self.ypad = ypad
self._side = side
def getSpaceBefore(self):
return max(self.P.getSpaceBefore(),self.I.getSpaceBefore())
def getSpaceAfter(self):
return max(self.P.getSpaceAfter(),self.I.getSpaceAfter())
def wrap(self,availWidth,availHeight):
wI, hI = self.I.wrap(availWidth,availHeight)
self.wI = wI
self.hI = hI
# work out widths array for breaking
self.width = availWidth
P = self.P
style = P.style
xpad = self.xpad
ypad = self.ypad
leading = style.leading
leftIndent = style.leftIndent
later_widths = availWidth - leftIndent - style.rightIndent
intermediate_widths = later_widths - xpad - wI
first_line_width = intermediate_widths - style.firstLineIndent
P.width = 0
nIW = int((hI+ypad)/(leading*1.0))
P.blPara = P.breakLines([first_line_width] + nIW*[intermediate_widths]+[later_widths])
if self._side=='left':
self._offsets = [wI+xpad]*(1+nIW)+[0]
P.height = len(P.blPara.lines)*leading
self.height = max(hI,P.height)
return (self.width, self.height)
def split(self,availWidth, availHeight):
P, wI, hI, ypad = self.P, self.wI, self.hI, self.ypad
if hI+ypad>availHeight or len(P.frags)<=0: return []
S = P.split(availWidth,availHeight)
if not S: return S
P = self.P = S[0]
del S[0]
style = P.style
P.height = len(self.P.blPara.lines)*style.leading
self.height = max(hI,P.height)
return [self]+S
def draw(self):
canv = self.canv
if self._side=='left':
self.I.drawOn(canv,0,self.height-self.hI)
self.P._offsets = self._offsets
try:
self.P.drawOn(canv,0,0)
finally:
del self.P._offsets
else:
self.I.drawOn(canv,self.width-self.wI-self.xpad,self.height-self.hI)
self.P.drawOn(canv,0,0)
class FailOnWrap(NullDraw):
def wrap(self, availWidth, availHeight):
raise ValueError("FailOnWrap flowable wrapped and failing as ordered!")
class FailOnDraw(Flowable):
def wrap(self, availWidth, availHeight):
return 0,0
def draw(self):
raise ValueError("FailOnDraw flowable drawn, and failing as ordered!")
class HRFlowable(Flowable):
'''Like the hr tag'''
def __init__(self,
width="80%",
thickness=1,
lineCap='round',
color=lightgrey,
spaceBefore=1, spaceAfter=1,
hAlign='CENTER', vAlign='BOTTOM',
dash=None):
Flowable.__init__(self)
self.width = width
self.lineWidth = thickness
self.lineCap=lineCap
self.spaceBefore = spaceBefore
self.spaceAfter = spaceAfter
self.color = color
self.hAlign = hAlign
self.vAlign = vAlign
self.dash = dash
def __repr__(self):
return "HRFlowable(width=%s, height=%s)" % (self.width, self.height)
def wrap(self, availWidth, availHeight):
w = self.width
if type(w) is type(''):
w = w.strip()
if w.endswith('%'): w = availWidth*float(w[:-1])*0.01
else: w = float(w)
w = min(w,availWidth)
self._width = w
return w, self.lineWidth
def draw(self):
canv = self.canv
canv.saveState()
canv.setLineWidth(self.lineWidth)
canv.setLineCap({'butt':0,'round':1, 'square': 2}[self.lineCap.lower()])
canv.setStrokeColor(self.color)
if self.dash: canv.setDash(self.dash)
canv.line(0, 0, self._width, self.height)
canv.restoreState()
class _PTOInfo:
def __init__(self,trailer,header):
self.trailer = _flowableSublist(trailer)
self.header = _flowableSublist(header)
def cdeepcopy(obj):
if hasattr(obj,'deepcopy'):
return obj.deepcopy()
else:
return deepcopy(obj)
class _Container(_ContainerSpace): #Abstract some common container like behaviour
def drawOn(self, canv, x, y, _sW=0, scale=1.0, content=None, aW=None):
'''we simulate being added to a frame'''
from reportlab.platypus.doctemplate import ActionFlowable, Indenter
x0 = x
y0 = y
pS = 0
if aW is None: aW = self.width
aW *= scale
if content is None:
content = self._content
x = self._hAlignAdjust(x,_sW*scale)
y += self.height*scale
yt = y
frame = getattr(self,'_frame',None)
for c in content:
if not ignoreContainerActions and isinstance(c,ActionFlowable):
c.apply(self.canv._doctemplate)
continue
if isinstance(c,Indenter):
x += c.left*scale
aW -= (c.left+c.right)*scale
continue
w, h = c.wrapOn(canv,aW,0xfffffff)
if (w<_FUZZ or h<_FUZZ) and not getattr(c,'_ZEROSIZE',None): continue
if yt!=y:
s = c.getSpaceBefore()
if not getattr(c,'_SPACETRANSFER',False):
h += max(s-pS,0)
y -= h
fbg = getattr(frame,'_frameBGs',None)
s = c.getSpaceAfter()
if getattr(c,'_SPACETRANSFER',False):
s = pS
pS = s
if fbg:
fbgl, fbgr, fbgc = fbg[-1]
fbw = scale*(frame._width-fbgl-fbgr)
fbh = y + h + pS
fby = max(y0,y-pS)
fbh = max(0,fbh-fby)
if abs(fbw)>_FUZZ and abs(fbh)>_FUZZ:
canv.saveState()
canv.setFillColor(fbgc)
canv.rect(x0+scale*(fbgl-frame._leftPadding)-0.1,fby-0.1,fbw+0.2,fbh+0.2,stroke=0,fill=1)
canv.restoreState()
c._frame = frame
c.drawOn(canv,x,y,_sW=aW-w)
if c is not content[-1] and not getattr(c,'_SPACETRANSFER',None):
y -= pS
del c._frame
def copyContent(self,content=None):
C = [].append
for c in (content or self._content):
C(cdeepcopy(c))
self._content = C.__self__
class PTOContainer(_Container,Flowable):
'''PTOContainer(contentList,trailerList,headerList)
A container for flowables decorated with trailer & header lists.
If the split operation would be called then the trailer and header
lists are injected before and after the split. This allows specialist
"please turn over" and "continued from previous" like behaviours.'''
def __init__(self,content,trailer=None,header=None):
I = _PTOInfo(trailer,header)
self._content = C = []
for _ in _flowableSublist(content):
if isinstance(_,PTOContainer):
C.extend(_._content)
else:
C.append(_)
if not hasattr(_,'_ptoinfo'): _._ptoinfo = I
def wrap(self,availWidth,availHeight):
self.width, self.height = _listWrapOn(self._content,availWidth,self.canv)
return self.width,self.height
def split(self, availWidth, availHeight):
from reportlab.platypus.doctemplate import Indenter
if availHeight<0: return []
canv = self.canv
C = self._content
x = i = H = pS = hx = 0
n = len(C)
I2W = {}
dLeft = dRight = 0
for x in xrange(n):
c = C[x]
I = c._ptoinfo
if I not in I2W.keys():
T = I.trailer
Hdr = I.header
tW, tH = _listWrapOn(T, availWidth, self.canv)
if len(T): #trailer may have no content
tSB = T[0].getSpaceBefore()
else:
tSB = 0
I2W[I] = T,tW,tH,tSB
else:
T,tW,tH,tSB = I2W[I]
_, h = c.wrapOn(canv,availWidth,0xfffffff)
if isinstance(c,Indenter):
dw = c.left+c.right
dLeft += c.left
dRight += c.right
availWidth -= dw
pS = 0
hx = 0
else:
if x:
hx = max(c.getSpaceBefore()-pS,0)
h += hx
pS = c.getSpaceAfter()
H += h+pS
tHS = tH+max(tSB,pS)
if H+tHS>=availHeight-_FUZZ: break
i += 1
#first retract last thing we tried
H -= (h+pS)
#attempt a sub split on the last one we have
aH = (availHeight-H-tHS-hx)*0.99999
if aH>=0.05*availHeight:
SS = c.splitOn(canv,availWidth,aH)
else:
SS = []
if abs(dLeft)+abs(dRight)>1e-8:
R1I = [Indenter(-dLeft,-dRight)]
R2I = [Indenter(dLeft,dRight)]
else:
R1I = R2I = []
if not SS:
j = i
while i>1 and C[i-1].getKeepWithNext():
i -= 1
C[i].keepWithNext = 0
if i==1 and C[0].getKeepWithNext():
#robin's black sheep
i = j
C[0].keepWithNext = 0
F = [UseUpSpace()]
if len(SS)>1:
R1 = C[:i]+SS[:1]+R1I+T+F
R2 = Hdr+R2I+SS[1:]+C[i+1:]
elif not i:
return []
else:
R1 = C[:i]+R1I+T+F
R2 = Hdr+R2I+C[i:]
T = R1 + [PTOContainer(R2,[copy(x) for x in I.trailer],[copy(x) for x in I.header])]
return T
#utility functions used by KeepInFrame
def _hmodel(s0,s1,h0,h1):
# calculate the parameters in the model
# h = a/s**2 + b/s
a11 = 1./s0**2
a12 = 1./s0
a21 = 1./s1**2
a22 = 1./s1
det = a11*a22-a12*a21
b11 = a22/det
b12 = -a12/det
b21 = -a21/det
b22 = a11/det
a = b11*h0+b12*h1
b = b21*h0+b22*h1
return a,b
def _qsolve(h,ab):
'''solve the model v = a/s**2 + b/s for an s which gives us v==h'''
a,b = ab
if abs(a)<=_FUZZ:
return b/h
t = 0.5*b/a
from math import sqrt
f = -h/a
r = t*t-f
if r<0: return None
r = sqrt(r)
if t>=0:
s1 = -t - r
else:
s1 = -t + r
s2 = f/s1
return max(1./s1, 1./s2)
class KeepInFrame(_Container,Flowable):
def __init__(self, maxWidth, maxHeight, content=[], mergeSpace=1, mode='shrink', name='',hAlign='LEFT',vAlign='BOTTOM', fakeWidth=None):
'''mode describes the action to take when overflowing
error raise an error in the normal way
continue ignore ie just draw it and report maxWidth, maxHeight
shrink shrinkToFit
truncate fit as much as possible
set fakeWidth to False to make _listWrapOn do the 'right' thing
'''
self.name = name
self.maxWidth = maxWidth
self.maxHeight = maxHeight
self.mode = mode
assert mode in ('error','overflow','shrink','truncate'), '%s invalid mode value %s' % (self.identity(),mode)
assert maxHeight>=0, '%s invalid maxHeight value %s' % (self.identity(),maxHeight)
if mergeSpace is None: mergeSpace = overlapAttachedSpace
self.mergespace = mergeSpace
self._content = content or []
self.vAlign = vAlign
self.hAlign = hAlign
self.fakeWidth = fakeWidth
def _getAvailableWidth(self):
return self.maxWidth - self._leftExtraIndent - self._rightExtraIndent
def identity(self, maxLen=None):
return "<%s at %s%s%s> size=%sx%s" % (self.__class__.__name__, hex(id(self)), self._frameName(),
getattr(self,'name','') and (' name="%s"'% getattr(self,'name','')) or '',
getattr(self,'maxWidth','') and (' maxWidth=%s'%fp_str(getattr(self,'maxWidth',0))) or '',
getattr(self,'maxHeight','')and (' maxHeight=%s' % fp_str(getattr(self,'maxHeight')))or '')
def wrap(self,availWidth,availHeight):
from reportlab.platypus.doctemplate import LayoutError
mode = self.mode
maxWidth = float(min(self.maxWidth or availWidth,availWidth))
maxHeight = float(min(self.maxHeight or availHeight,availHeight))
fakeWidth = self.fakeWidth
W, H = _listWrapOn(self._content,maxWidth,self.canv, fakeWidth=fakeWidth)
if (mode=='error' and (W>maxWidth+_FUZZ or H>maxHeight+_FUZZ)):
ident = 'content %sx%s too large for %s' % (W,H,self.identity(30))
#leave to keep apart from the raise
raise LayoutError(ident)
elif W<=maxWidth+_FUZZ and H<=maxHeight+_FUZZ:
self.width = W-_FUZZ #we take what we get
self.height = H-_FUZZ
elif mode in ('overflow','truncate'): #we lie
self.width = min(maxWidth,W)-_FUZZ
self.height = min(maxHeight,H)-_FUZZ
else:
def func(x):
x = float(x)
W, H = _listWrapOn(self._content,x*maxWidth,self.canv, fakeWidth=fakeWidth)
W /= x
H /= x
return W, H
W0 = W
H0 = H
s0 = 1
if W>maxWidth+_FUZZ:
#squeeze out the excess width and or Height
s1 = W/maxWidth #linear model
W, H = func(s1)
if H<=maxHeight+_FUZZ:
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s1
return W,H
s0 = s1
H0 = H
W0 = W
s1 = H/maxHeight
W, H = func(s1)
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s1
if H<min(0.95*maxHeight,maxHeight-10) or H>=maxHeight+_FUZZ:
#the standard case W should be OK, H is short we want
#to find the smallest s with H<=maxHeight
H1 = H
for f in 0, 0.01, 0.05, 0.10, 0.15:
#apply the quadratic model
s = _qsolve(maxHeight*(1-f),_hmodel(s0,s1,H0,H1))
W, H = func(s)
if H<=maxHeight+_FUZZ and W<=maxWidth+_FUZZ:
self.width = W-_FUZZ
self.height = H-_FUZZ
self._scale = s
break
return self.width, self.height
def drawOn(self, canv, x, y, _sW=0):
scale = getattr(self,'_scale',1.0)
truncate = self.mode=='truncate'
ss = scale!=1.0 or truncate
if ss:
canv.saveState()
if truncate:
p = canv.beginPath()
p.rect(x, y, self.width,self.height)
canv.clipPath(p,stroke=0)
else:
canv.translate(x,y)
x=y=0
canv.scale(1.0/scale, 1.0/scale)
_Container.drawOn(self, canv, x, y, _sW=_sW, scale=scale)
if ss: canv.restoreState()
class ImageAndFlowables(_Container,Flowable):
'''combine a list of flowables and an Image'''
def __init__(self,I,F,imageLeftPadding=0,imageRightPadding=3,imageTopPadding=0,imageBottomPadding=3,
imageSide='right', imageHref=None):
self._content = _flowableSublist(F)
self._I = I
self._irpad = imageRightPadding
self._ilpad = imageLeftPadding
self._ibpad = imageBottomPadding
self._itpad = imageTopPadding
self._side = imageSide
self.imageHref = imageHref
def deepcopy(self):
c = copy(self) #shallow
self._reset()
c.copyContent() #partially deep?
return c
def getSpaceAfter(self):
if hasattr(self,'_C1'):
C = self._C1
elif hasattr(self,'_C0'):
C = self._C0
else:
C = self._content
return _Container.getSpaceAfter(self,C)
def getSpaceBefore(self):
return max(self._I.getSpaceBefore(),_Container.getSpaceBefore(self))
def _reset(self):
for a in ('_wrapArgs','_C0','_C1'):
try:
delattr(self,a)
except:
pass
def wrap(self,availWidth,availHeight):
canv = self.canv
I = self._I
if hasattr(self,'_wrapArgs'):
if self._wrapArgs==(availWidth,availHeight) and getattr(I,'_oldDrawSize',None) is None:
return self.width,self.height
self._reset()
I._unRestrictSize()
self._wrapArgs = availWidth, availHeight
I.wrap(availWidth,availHeight)
wI, hI = I._restrictSize(availWidth,availHeight)
self._wI = wI
self._hI = hI
ilpad = self._ilpad
irpad = self._irpad
ibpad = self._ibpad
itpad = self._itpad
self._iW = iW = availWidth - irpad - wI - ilpad
aH = itpad + hI + ibpad
if iW>_FUZZ:
W,H0,self._C0,self._C1 = self._findSplit(canv,iW,aH)
else:
W = availWidth
H0 = 0
if W>iW+_FUZZ:
self._C0 = []
self._C1 = self._content
aH = self._aH = max(aH,H0)
self.width = availWidth
if not self._C1:
self.height = aH
else:
W1,H1 = _listWrapOn(self._C1,availWidth,canv)
self.height = aH+H1
return self.width, self.height
def split(self,availWidth, availHeight):
if hasattr(self,'_wrapArgs'):
I = self._I
if self._wrapArgs!=(availWidth,availHeight) or getattr(I,'_oldDrawSize',None) is not None:
self._reset()
I._unRestrictSize()
W,H=self.wrap(availWidth,availHeight)
if self._aH>availHeight: return []
C1 = self._C1
if C1:
S = C1[0].split(availWidth,availHeight-self._aH)
if not S:
_C1 = []
else:
_C1 = [S[0]]
C1 = S[1:]+C1[1:]
else:
_C1 = []
return [ImageAndFlowables(
self._I,
self._C0+_C1,
imageLeftPadding=self._ilpad,
imageRightPadding=self._irpad,
imageTopPadding=self._itpad,
imageBottomPadding=self._ibpad,
imageSide=self._side, imageHref=self.imageHref)
]+C1
def drawOn(self, canv, x, y, _sW=0):
if self._side=='left':
Ix = x + self._ilpad
Fx = Ix+ self._irpad + self._wI
else:
Ix = x + self.width-self._wI-self._irpad
Fx = x
self._I.drawOn(canv,Ix,y+self.height-self._itpad-self._hI)
if self.imageHref:
canv.linkURL(self.imageHref, (Ix, y+self.height-self._itpad-self._hI, Ix + self._wI, y+self.height), relative=1)
if self._C0:
_Container.drawOn(self, canv, Fx, y, content=self._C0, aW=self._iW)
if self._C1:
aW, aH = self._wrapArgs
_Container.drawOn(self, canv, x, y-self._aH,content=self._C1, aW=aW)
def _findSplit(self,canv,availWidth,availHeight,mergeSpace=1,obj=None):
'''return max width, required height for a list of flowables F'''
W = 0
H = 0
pS = sB = 0
atTop = 1
F = self._content
for i,f in enumerate(F):
w,h = f.wrapOn(canv,availWidth,0xfffffff)
if w<=_FUZZ or h<=_FUZZ: continue
W = max(W,w)
if not atTop:
s = f.getSpaceBefore()
if mergeSpace: s = max(s-pS,0)
H += s
else:
if obj is not None: obj._spaceBefore = f.getSpaceBefore()
atTop = 0
if H>=availHeight or w>availWidth:
return W, availHeight, F[:i],F[i:]
H += h
if H>availHeight:
from reportlab.platypus.paragraph import Paragraph
aH = availHeight-(H-h)
if isinstance(f,(Paragraph,Preformatted)):
leading = f.style.leading
nH = leading*int(aH/float(leading))+_FUZZ
if nH<aH: nH += leading
availHeight += nH-aH
aH = nH
S = cdeepcopy(f).splitOn(canv,availWidth,aH)
if not S:
return W, availHeight, F[:i],F[i:]
else:
return W,availHeight,F[:i]+S[:1],S[1:]+F[i+1:]
pS = f.getSpaceAfter()
H += pS
if obj is not None: obj._spaceAfter = pS
return W, H-pS, F, []
class AnchorFlowable(Spacer):
'''create a bookmark in the pdf'''
_ZEROSIZE=1
_SPACETRANSFER = True
def __init__(self,name):
Spacer.__init__(self,0,0)
self._name = name
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,self._name)
def wrap(self,aW,aH):
return 0,0
def draw(self):
self.canv.bookmarkHorizontal(self._name,0,0)
class FrameBG(AnchorFlowable):
"""Start or stop coloring the frame background
left & right are distances from the edge of the frame to start stop colouring.
"""
_ZEROSIZE=1
def __init__(self, color=None, left=0, right=0, start=True):
Spacer.__init__(self,0,0)
self.start = start
if start:
from reportlab.platypus.doctemplate import _evalMeasurement
self.left = _evalMeasurement(left)
self.right = _evalMeasurement(right)
self.color = color
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__,', '.join(['%s=%r' % (i,getattr(self,i,None)) for i in 'start color left right'.split()]))
def draw(self):
frame = getattr(self,'_frame',None)
if frame is None: return
if self.start:
w = getattr(frame,'_lineWidth',0)
frame._frameBGs.append((self.left,self.right,self.color))
elif frame._frameBGs:
frame._frameBGs.pop()
class FrameSplitter(NullDraw):
'''When encountered this flowable should either switch directly to nextTemplate
if remaining space in the current frame is less than gap+required or it should
temporarily modify the current template to have the frames from nextTemplate
that are listed in nextFrames and switch to the first of those frames.
'''
_ZEROSIZE=1
def __init__(self,nextTemplate,nextFrames=[],gap=10,required=72):
self.nextTemplate=nextTemplate
self.nextFrames=nextFrames or []
self.gap=gap
self.required=required
def wrap(self,aW,aH):
frame = self._frame
from reportlab.platypus.doctemplate import NextPageTemplate,CurrentFrameFlowable,LayoutError
G=[NextPageTemplate(self.nextTemplate)]
if aH<self.gap+self.required-_FUZZ:
#we are going straight to the nextTemplate with no attempt to modify the frames
G.append(PageBreak())
else:
#we are going to modify the incoming templates
templates = self._doctemplateAttr('pageTemplates')
if templates is None:
raise LayoutError('%s called in non-doctemplate environment'%self.identity())
T=[t for t in templates if t.id==self.nextTemplate]
if not T:
raise LayoutError('%s.nextTemplate=%s not found' % (self.identity(),self.nextTemplate))
T=T[0]
F=[f for f in T.frames if f.id in self.nextFrames]
N=[f.id for f in F]
N=[f for f in self.nextFrames if f not in N]
if N:
raise LayoutError('%s frames=%r not found in pageTemplate(%s)\n%r has frames %r' % (self.identity(),N,T.id,T,[f.id for f in T.frames]))
T=self._doctemplateAttr('pageTemplate')
def unwrap(canv,doc,T=T,onPage=T.onPage,oldFrames=T.frames):
T.frames=oldFrames
T.onPage=onPage
onPage(canv,doc)
T.onPage=unwrap
h=aH-self.gap
for i,f in enumerate(F):
f=copy(f)
f.height=h
f._reset()
F[i]=f
T.frames=F
G.append(CurrentFrameFlowable(F[0].id))
frame.add_generated_content(*G)
return 0,0
from reportlab.lib.sequencer import _type2formatter
_bulletNames = dict(
bulletchar=u'\u2022', #usually a small circle
circle=u'\u25cf', #circle as high as the font
square=u'\u25a0',
disc=u'\u25cf',
diamond=u'\u25c6',
rarrowhead=u'\u27a4',
)
def _bulletFormat(value,type='1',format=None):
if type=='bullet':
s = _bulletNames.get(value,value)
else:
s = _type2formatter[type](int(value))
if format:
if isinstance(format,str):
s = format % s
elif isinstance(format, collections.Callable):
s = format(s)
else:
raise ValueError('unexpected BulletDrawer format %r' % format)
return s
class BulletDrawer:
def __init__(self,
value='0',
bulletAlign='left',
bulletType='1',
bulletColor='black',
bulletFontName='Helvetica',
bulletFontSize=12,
bulletOffsetY=0,
bulletDedent=0,
bulletDir='ltr',
bulletFormat=None,
):
self.value = value
self._bulletAlign = bulletAlign
self._bulletType = bulletType
self._bulletColor = bulletColor
self._bulletFontName = bulletFontName
self._bulletFontSize = bulletFontSize
self._bulletOffsetY = bulletOffsetY
self._bulletDedent = bulletDedent
self._bulletDir = bulletDir
self._bulletFormat = bulletFormat
def drawOn(self,indenter,canv,x,y,_sW=0):
value = self.value
if not value: return
canv.saveState()
canv.translate(x, y)
y = indenter.height-self._bulletFontSize+self._bulletOffsetY
if self._bulletDir=='rtl':
x = indenter.width - indenter._rightIndent + self._bulletDedent
else:
x = indenter._leftIndent - self._bulletDedent
canv.setFont(self._bulletFontName,self._bulletFontSize)
canv.setFillColor(self._bulletColor)
bulletAlign = self._bulletAlign
value = _bulletFormat(value,self._bulletType,self._bulletFormat)
if bulletAlign=='left':
canv.drawString(x,y,value)
elif bulletAlign=='right':
canv.drawRightString(x,y,value)
elif bulletAlign in ('center','centre'):
canv.drawCentredString(x,y,value)
elif bulletAlign.startswith('numeric') or bulletAlign.startswith('decimal'):
pc = bulletAlign[7:].strip() or '.'
canv.drawAlignedString(x,y,value,pc)
else:
raise ValueError('Invalid bulletAlign: %r' % bulletAlign)
canv.restoreState()
def _computeBulletWidth(b,value):
value = _bulletFormat(value,b._bulletType,b._bulletFormat)
return stringWidth(value,b._bulletFontName,b._bulletFontSize)
class DDIndenter(Flowable):
_IndenterAttrs = '_flowable _leftIndent _rightIndent width height'.split()
def __init__(self,flowable,leftIndent=0,rightIndent=0):
self._flowable = flowable
self._leftIndent = leftIndent
self._rightIndent = rightIndent
self.width = None
self.height = None
def split(self, aW, aH):
S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH)
return [
DDIndenter(s,
leftIndent=self._leftIndent,
rightIndent=self._rightIndent,
) for s in S
]
def drawOn(self, canv, x, y, _sW=0):
self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent))
def wrap(self, aW, aH):
w,h = self._flowable.wrap(aW-self._leftIndent-self._rightIndent, aH)
self.width = w+self._leftIndent+self._rightIndent
self.height = h
return self.width,h
def __getattr__(self,a):
if a in self._IndenterAttrs:
try:
return self.__dict__[a]
except KeyError:
if a not in ('spaceBefore','spaceAfter'):
raise
return getattr(self._flowable,a)
def __setattr__(self,a,v):
if a in self._IndenterAttrs:
self.__dict__[a] = v
else:
setattr(self._flowable,a,v)
def __delattr__(self,a):
if a in self._IndenterAttrs:
del self.__dict__[a]
else:
delattr(self._flowable,a)
def identity(self,maxLen=None):
return '%s containing %s' % (self.__class__.__name__,self._flowable.identity(maxLen))
class LIIndenter(DDIndenter):
_IndenterAttrs = '_flowable _bullet _leftIndent _rightIndent width height spaceBefore spaceAfter'.split()
def __init__(self,flowable,leftIndent=0,rightIndent=0,bullet=None, spaceBefore=None, spaceAfter=None):
self._flowable = flowable
self._bullet = bullet
self._leftIndent = leftIndent
self._rightIndent = rightIndent
self.width = None
self.height = None
if spaceBefore is not None:
self.spaceBefore = spaceBefore
if spaceAfter is not None:
self.spaceAfter = spaceAfter
def split(self, aW, aH):
S = self._flowable.split(aW-self._leftIndent-self._rightIndent, aH)
return [
LIIndenter(s,
leftIndent=self._leftIndent,
rightIndent=self._rightIndent,
bullet = (s is S[0] and self._bullet or None),
) for s in S
]
def drawOn(self, canv, x, y, _sW=0):
if self._bullet:
self._bullet.drawOn(self,canv,x,y,0)
self._flowable.drawOn(canv,x+self._leftIndent,y,max(0,_sW-self._leftIndent-self._rightIndent))
from reportlab.lib.styles import ListStyle
class ListItem:
def __init__(self,
flowables, #the initial flowables
style=None,
#leftIndent=18,
#rightIndent=0,
#spaceBefore=None,
#spaceAfter=None,
#bulletType='1',
#bulletColor='black',
#bulletFontName='Helvetica',
#bulletFontSize=12,
#bulletOffsetY=0,
#bulletDedent='auto',
#bulletDir='ltr',
#bulletFormat=None,
**kwds
):
if not isinstance(flowables,(list,tuple)):
flowables = (flowables,)
self._flowables = flowables
params = self._params = {}
if style:
if not isinstance(style,ListStyle):
raise ValueError('%s style argument (%r) not a ListStyle' % (self.__class__.__name__,style))
self._style = style
for k in ListStyle.defaults:
if k in kwds:
v = kwds.get(k)
elif style:
v = getattr(style,k)
else:
continue
params[k] = v
for k in ('value', 'spaceBefore','spaceAfter'):
v = kwds.get(k,getattr(style,k,None))
if v is not None:
params[k] = v
class _LIParams:
def __init__(self,flowable,params,value,first):
self.flowable = flowable
self.params = params
self.value = value
self.first= first
class ListFlowable(_Container,Flowable):
def __init__(self,
flowables, #the initial flowables
start=1,
style=None,
#leftIndent=18,
#rightIndent=0,
#spaceBefore=None,
#spaceAfter=None,
#bulletType='1',
#bulletColor='black',
#bulletFontName='Helvetica',
#bulletFontSize=12,
#bulletOffsetY=0,
#bulletDedent='auto',
#bulletDir='ltr',
#bulletFormat=None,
**kwds
):
self._flowables = flowables
if style:
if not isinstance(style,ListStyle):
raise ValueError('%s style argument not a ListStyle' % self.__class__.__name__)
self.style = style
for k,v in ListStyle.defaults.items():
setattr(self,'_'+k,kwds.get(k,getattr(style,k,v)))
if start is None:
start = getattr(self,'_start',None)
if start is None:
if getattr(self,'_bulletType','1')=='bullet':
start = 'bulletchar'
else:
start = '1'
self._start = start
for k in ('spaceBefore','spaceAfter'):
v = kwds.get(k,getattr(style,k,None))
if v is not None:
setattr(self,k,v)
self._content = self._getContent()
del self._flowables
self._dims = None
def wrap(self,aW,aH):
if self._dims!=aW:
self.width, self.height = _listWrapOn(self._content,aW,self.canv)
self._dims = aW
return self.width,self.height
def split(self,aW,aH):
return self._content
def _flowablesIter(self):
for f in self._flowables:
if isinstance(f,(list,tuple)):
if f:
for i, z in enumerate(f):
yield i==0 and not isinstance(z,LIIndenter), z
elif isinstance(f,ListItem):
params = f._params
if not params:
#meerkat simples just a list like object
for i, z in enumerate(f._flowables):
if isinstance(z,LIIndenter):
raise ValueError('LIIndenter not allowed in ListItem')
yield i==0, z
else:
params = params.copy()
value = params.pop('value',None)
spaceBefore = params.pop('spaceBefore',None)
spaceAfter = params.pop('spaceAfter',None)
n = len(f._flowables) - 1
for i, z in enumerate(f._flowables):
P = params.copy()
if not i and spaceBefore is not None:
P['spaceBefore'] = spaceBefore
if i==n and spaceAfter is not None:
P['spaceAfter'] = spaceAfter
if i: value=None
yield 0, _LIParams(z,P,value,i==0)
else:
yield not isinstance(f,LIIndenter), f
def _makeLIIndenter(self,flowable, bullet, params=None):
if params:
leftIndent = params.get('leftIndent',self._leftIndent)
rightIndent = params.get('rightIndent',self._rightIndent)
spaceBefore = params.get('spaceBefore',None)
spaceAfter = params.get('spaceAfter',None)
return LIIndenter(flowable,leftIndent,rightIndent,bullet,spaceBefore=spaceBefore,spaceAfter=spaceAfter)
else:
return LIIndenter(flowable,self._leftIndent,self._rightIndent,bullet)
def _makeBullet(self,value,params=None):
if params is None:
def getp(a):
return getattr(self,'_'+a)
else:
style = getattr(params,'style',None)
def getp(a):
if a in params: return params[a]
if style and a in style.__dict__: return getattr(self,a)
return getattr(self,'_'+a)
return BulletDrawer(
value=value,
bulletAlign=getp('bulletAlign'),
bulletType=getp('bulletType'),
bulletColor=getp('bulletColor'),
bulletFontName=getp('bulletFontName'),
bulletFontSize=getp('bulletFontSize'),
bulletOffsetY=getp('bulletOffsetY'),
bulletDedent=getp('calcBulletDedent'),
bulletDir=getp('bulletDir'),
bulletFormat=getp('bulletFormat'),
)
def _getContent(self):
value = self._start
bt = self._bulletType
inc = int(bt in '1aAiI')
if inc: value = int(value)
bd = self._bulletDedent
if bd=='auto':
align = self._bulletAlign
dir = self._bulletDir
if dir=='ltr' and align=='left':
bd = self._leftIndent
elif align=='right':
bd = self._rightIndent
else:
#we need to work out the maximum width of any of the labels
tvalue = value
maxW = 0
for d,f in self._flowablesIter():
if d:
maxW = max(maxW,_computeBulletWidth(self,tvalue))
if inc: tvalue += inc
elif isinstance(f,LIIndenter):
b = f._bullet
if b:
if b.bulletType==bt:
maxW = max(maxW,_computeBulletWidth(b,b.value))
tvalue = int(b.value)
else:
maxW = max(maxW,_computeBulletWidth(self,tvalue))
if inc: tvalue += inc
if dir=='ltr':
if align=='right':
bd = self._leftIndent - maxW
else:
bd = self._leftIndent - maxW*0.5
elif align=='left':
bd = self._rightIndent - maxW
else:
bd = self._rightIndent - maxW*0.5
self._calcBulletDedent = bd
S = []
aS = S.append
i=0
for d,f in self._flowablesIter():
fparams = {}
if not i:
i += 1
spaceBefore = getattr(self,'spaceBefore',None)
if spaceBefore is not None:
fparams['spaceBefore'] = spaceBefore
if d:
aS(self._makeLIIndenter(f,bullet=self._makeBullet(value),params=fparams))
if inc: value += inc
elif isinstance(f,LIIndenter):
b = f._bullet
if b:
if b.bulletType!=bt:
raise ValueError('Included LIIndenter bulletType=%s != OrderedList bulletType=%s' % (b.bulletType,bt))
value = int(b.value)
else:
f._bullet = self._makeBullet(value,params=getattr(f,'params',None))
if fparams:
f.__dict__['spaceBefore'] = max(f.__dict__.get('spaceBefore',0),spaceBefore)
aS(f)
if inc: value += inc
elif isinstance(f,_LIParams):
fparams.update(f.params)
z = self._makeLIIndenter(f.flowable,bullet=None,params=fparams)
if f.first:
if f.value is not None:
value = f.value
if inc: value = int(value)
z._bullet = self._makeBullet(value,f.params)
if inc: value += inc
aS(z)
else:
aS(self._makeLIIndenter(f,bullet=None,params=fparams))
spaceAfter = getattr(self,'spaceAfter',None)
if spaceAfter is not None:
f=S[-1]
f.__dict__['spaceAfter'] = max(f.__dict__.get('spaceAfter',0),spaceAfter)
return S
class TopPadder(Flowable):
'''wrap a single flowable so that its first bit will be
padded to fill out the space so that it appears at the
bottom of its frame'''
def __init__(self,f):
self.__dict__['_TopPadder__f'] = f
def wrap(self,aW,aH):
w,h = self.__f.wrap(aW,aH)
self.__dict__['_TopPadder__dh'] = aH-h
return w,h
def split(self,aW,aH):
S = self.__f.split(aW,aH)
if len(S)>1:
S[0] = TopPadder(S[0])
return S
def drawOn(self, canvas, x, y, _sW=0):
self.__f.drawOn(canvas,x,y-max(0,self.__dh-1e-8),_sW)
def __setattr__(self,a,v):
setattr(self.__f,a,v)
def __getattr__(self,a):
return getattr(self.__f,a)
def __delattr__(self,a):
delattr(self.__f,a)
class DocAssign(NullDraw):
'''At wrap time this flowable evaluates var=expr in the doctemplate namespace'''
_ZEROSIZE=1
def __init__(self,var,expr,life='forever'):
Flowable.__init__(self)
self.args = var,expr,life
def funcWrap(self,aW,aH):
NS=self._doctemplateAttr('_nameSpace')
NS.update(dict(availableWidth=aW,availableHeight=aH))
try:
return self.func()
finally:
for k in 'availableWidth','availableHeight':
try:
del NS[k]
except:
pass
def func(self):
return self._doctemplateAttr('d'+self.__class__.__name__[1:])(*self.args)
def wrap(self,aW,aH):
self.funcWrap(aW,aH)
return 0,0
class DocExec(DocAssign):
'''at wrap time exec stmt in doc._nameSpace'''
def __init__(self,stmt,lifetime='forever'):
Flowable.__init__(self)
self.args=stmt,lifetime
class DocPara(DocAssign):
'''at wrap time create a paragraph with the value of expr as text
if format is specified it should use %(__expr__)s for string interpolation
of the expression expr (if any). It may also use %(name)s interpolations
for other variables in the namespace.
suitable defaults will be used if style and klass are None
'''
def __init__(self,expr,format=None,style=None,klass=None,escape=True):
Flowable.__init__(self)
self.expr=expr
self.format=format
self.style=style
self.klass=klass
self.escape=escape
def func(self):
expr = self.expr
if expr:
if not isinstance(expr,str): expr = str(expr)
return self._doctemplateAttr('docEval')(expr)
def add_content(self,*args):
self._doctemplateAttr('frame').add_generated_content(*args)
def get_value(self,aW,aH):
value = self.funcWrap(aW,aH)
if self.format:
NS=self._doctemplateAttr('_nameSpace').copy()
NS.update(dict(availableWidth=aW,availableHeight=aH))
NS['__expr__'] = value
value = self.format % NS
else:
value = str(value)
return value
def wrap(self,aW,aH):
value = self.get_value(aW,aH)
P = self.klass
if not P:
from reportlab.platypus.paragraph import Paragraph as P
style = self.style
if not style:
from reportlab.lib.styles import getSampleStyleSheet
style=getSampleStyleSheet()['Code']
if self.escape:
from xml.sax.saxutils import escape
value=escape(value)
self.add_content(P(value,style=style))
return 0,0
class DocAssert(DocPara):
def __init__(self,cond,format=None):
Flowable.__init__(self)
self.expr=cond
self.format=format
def funcWrap(self,aW,aH):
self._cond = DocPara.funcWrap(self,aW,aH)
return self._cond
def wrap(self,aW,aH):
value = self.get_value(aW,aH)
if not bool(self._cond):
raise AssertionError(value)
return 0,0
class DocIf(DocPara):
def __init__(self,cond,thenBlock,elseBlock=[]):
Flowable.__init__(self)
self.expr = cond
self.blocks = elseBlock or [],thenBlock
def checkBlock(self,block):
if not isinstance(block,(list,tuple)):
block = (block,)
return block
def wrap(self,aW,aH):
self.add_content(*self.checkBlock(self.blocks[int(bool(self.funcWrap(aW,aH)))]))
return 0,0
class DocWhile(DocIf):
def __init__(self,cond,whileBlock):
Flowable.__init__(self)
self.expr = cond
self.block = self.checkBlock(whileBlock)
def wrap(self,aW,aH):
if bool(self.funcWrap(aW,aH)):
self.add_content(*(list(self.block)+[self]))
return 0,0
class SetTopFlowables(NullDraw):
_ZEROZSIZE = 1
def __init__(self,F,show=False):
self._F = F
self._show = show
def wrap(self,aW,aH):
doc = getattr(getattr(self,'canv',None),'_doctemplate',None)
if doc:
doc._topFlowables=self._F
if self._show and self._F:
doc.frame._generated_content = self._F
return 0,0
| bsd-3-clause | 4,462,128,863,631,193,600 | 35.542585 | 152 | 0.5512 | false | 3.803066 | false | false | false |
blendit/crowd | GUI_crowd_MapPanel.py | 1 | 6603 | import bpy
from bpy.types import Menu, Panel
from bpy.props import *
import os
import sys
import subprocess
import ast
script_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(script_dir)
# Get system's python path
proc = subprocess.Popen('python3 -c "import sys; print(sys.path)"', stdout=subprocess.PIPE, shell=True)
out, err = proc.communicate()
paths = ast.literal_eval(out.decode("utf-8"))
sys.path += (paths)
import blendit.SimulationData as Sim
import pickle as pic
def initSceneProperties(scn):
bpy.types.Scene.PosX = FloatProperty(
name="X",
description="position of the origin")
scn['PosX'] = 0
bpy.types.Scene.PosY = FloatProperty(
name="Y",
description="position of the origin")
scn['PosY'] = 0
bpy.types.Scene.MinX = FloatProperty(
name="Min",
description="Bound of the map")
scn['MinX'] = -float("inf")
bpy.types.Scene.MaxX = FloatProperty(
name="Max",
description="Bound of the map")
scn['MaxX'] = float("inf")
bpy.types.Scene.MinY = FloatProperty(
name="Max",
description="Bound of the map")
scn['MinY'] = -float("inf")
bpy.types.Scene.MaxY = FloatProperty(
name="Max",
description="Bound of the map")
scn['MaxY'] = float("inf")
bpy.types.Scene.GridP = FloatProperty(
name="P",
description="Grid precision",
subtype='PERCENTAGE',
default=100,
min=0,
max=100)
scn['GridP'] = 100
bpy.types.Scene.SelectString = StringProperty(
name="Input",
description="Enter an input file",
subtype='FILE_PATH')
scn['SelectString'] = "filename.py"
bpy.types.Scene.SaveString = StringProperty(
name="Output",
description="Enter an output file",
subtype='FILE_PATH')
scn['SaveString'] = "filename.py"
return
initSceneProperties(bpy.context.scene)
#
#
# class MapButtonsPanel(Panel):
# bl_category = 'Map'
# bl_space_type = 'VIEW_3D'
# bl_region_type = 'TOOLS'
#
# def draw(self, context):
# layout = self.layout
# scn = context.scene
class InputFile_Tools(Panel):
bl_label = "Input File"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.prop(scn, 'SelectString')
layout.operator("env.select")
layout.prop(scn, 'SaveString')
layout.operator("env.save")
class MapOrigin_Tools(Panel):
bl_label = "Map Origin"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
# COMPAT_ENGINES = {'BLENDER_RENDER'}
def draw(self, context):
layout = self.layout
scn = context.scene
layout.label(text="Origin Position:")
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.prop(scn, 'PosX')
row.prop(scn, 'PosY')
layout.operator("env.origin")
layout.operator("env.set")
class MapSize_Tools(Panel):
bl_label = "Map Bounds"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.label(text="X bounds:")
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.prop(scn, 'MinX', text="Min")
row.prop(scn, 'MaxX', text="Max")
layout.label(text="Y bounds:")
row = layout.row(align=True)
row.alignment = 'EXPAND'
row.prop(scn, 'MinY', text="Min")
row.prop(scn, 'MaxY', text="Max")
layout.operator("env.size")
class GridSize_Tools (Panel):
bl_label = "Grid Size"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.prop(scn, 'GridP')
layout.operator("env.grid")
class Generate_Tools (Panel):
bl_label = "Generate Map"
bl_category = "Map"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
def draw(self, context):
layout = self.layout
scn = context.scene
layout.operator("env.generate")
class MapSelectButton(bpy.types.Operator):
bl_idname = "env.select"
bl_label = "Set input as configuration"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
ic = open(SelectString, "rb")
Sim.graph = pic.load(ic)
ic.close()
return{'FINISHED'}
class MapSaveButton(bpy.types.Operator):
bl_idname = "env.save"
bl_label = "Save configuration"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
oc = open(SaveString, "wb")
pic.dump(Sim.graph, oc)
oc.close()
return{'FINISHED'}
class MapOriginCursorButton(bpy.types.Operator):
bl_idname = "env.origin"
bl_label = "From cursor"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
Pcursor = view.cursor_location
bpy.context.scene.PosX = Pcursor[0]
bpy.context.scene.PosY = Pcursor[1]
scn.cursor_location = (scn.PosX, scn.PosY, 0)
return{'FINISHED'}
class MapOriginButton(bpy.types.Operator):
bl_idname = "env.set"
bl_label = "Set map origin"
def execute(self, context):
scn = bpy.context.scene
view = bpy.context.space_data
Sim.OriginX = PosX
Sim.OriginY = PosY
return{'FINISHED'}
class MapSizeButton(bpy.types.Operator):
bl_idname = "env.size"
bl_label = "Set map size"
def execute(self, context):
scn = bpy.context.scene
Sim.MinX = MinX
Sim.MaxX = MaxX
Sim.MinY = MinY
Sim.MaxY = MaxY
return{'FINISHED'}
class MapGridButton(bpy.types.Operator):
bl_idname = "env.grid"
bl_label = "Set Grid size"
def execute(self, context):
scn = bpy.context.scene
coefficient = 5 - (GridP / 20)
Sim.Grid = Sim.MinGrid * (10 ** coefficient)
return{'FINISHED'}
class MapGenerationButton(bpy.types.Operator):
bl_idname = "env.generate"
bl_label = "Generate"
def execute(self, context):
scn = bpy.context.scene
Sim.renew_graph()
return{'FINISHED'}
bpy.utils.register_module(__name__)
| gpl-3.0 | 137,303,051,667,729,950 | 25.625 | 103 | 0.593518 | false | 3.394859 | false | false | false |
stefantkeller/VECSELsetup | exp/eval/light_light.py | 1 | 8684 | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import errorvalues as ev # github.com/stefantkeller/errorvalues
from VECSELsetup.eval.varycolor import varycolor
from VECSELsetup.eval.gen_functions import load, extract, plotinstructions_write, plotinstructions_read, lut_from_calibfolder, lut_interp_from_calibfolder, thermal_resistance
def main():
# before running this script:
# run eval_spectrum.py to provide the .._eval.csv files required for the spectra
# run calibration.py (with appropriate calib measurements)
# and don't forget temperature_heatsink (this is not necessary for this script here, but it provides interesting insights for the measurement at hand)
logfile = '../24_LL_ev/20150211_sample21-1-d6/spot333um.csv'
calib_folder = '../24_LL_ev/20150204_calib_333um_s21-1-d6'
#------------------------------------
# calibration
emis_lut = lut_from_calibfolder(calib_folder,identifiers=['Laser'],ignore_error=False) # emission has constant value solely due to BS, no ND in front of detector etc.
pump_lut, refl_lut = lut_interp_from_calibfolder(calib_folder,identifiers=['Pump','Refl'])
#------------------------------------
# load measurement
current_set, current, pump, refl, laser, spectra, meantemp = extract(logfile, identifiers=['Current','Pump','Refl','Laser','Spectra', 'Temperature'])
Temperatures = sorted(current_set.keys()) # set temperatures (round numbers like 15.0 or 22.5 etc)
T_out = dict((T,meantemp[T].round(1)) for T in Temperatures) # real temperatures for display in plot, including +-uncertainty
#------------------------------------
# calculate using calibration
absorbed, reflected, emitted, pumped, dissipated = {}, {}, {}, {}, {}
for T in Temperatures:
reflected[T] = refl_lut(refl[T])
pumped[T] = pump_lut(pump[T])
absorbed[T] = pumped[T] - reflected[T]
emitted[T] = emis_lut(laser[T])
dissipated[T] = absorbed[T] - emitted[T]
#
#------------------------------------
# invoke instructions for plot and fit
# plotting the data can be tricky to reproduce, store the plot properties in a text file and read from there!
# (easy to repeat the plot at a later time)
# open the instruction file in a text editor, edit the instructions and run this module again; it will use the new instructions
instrfile = logfile[:-4]+'_instr.csv'
plotinstructions_write(instrfile,Temperatures,calib_folder)
#------------------------------------
# retrieve instructions
instr = plotinstructions_read(instrfile)
#
#------------------------------------
# translate instructions
str2lst = lambda s: map(float,s[1:-1].split(','))
textx = float(instr['textx']) # x coordinate for text; same for first two subplots (absorbed-emitted and absorbed-reflectivity)
fontsize = float(instr['fontsize'])
title = instr['title']
xlim = str2lst(instr['xlim']) # range of x-axis; same for first two subplots
ylim1 = str2lst(instr['ylim1']) # range of y-axis of first (aborbed-emitted) plot
ylim2 = str2lst(instr['ylim2']) # range of second y-axis (absorbed-reflectivity)
xlim3 = str2lst(instr['xlim3']) # third x-axis; (dissipated-wavelength)
ylim3 = str2lst(instr['ylim3']) # 3rd y-axis
plot_temps_for_3 = str2lst(instr['plot_temps_for_3']) # which ones to plot? you may have measured a heat sink temperature without lasing output, whose data will confuse the reader, so you don't plot it.
textx3 = float(instr['textx3']) # x-coordinate of text in 3rd plot
texty3 = str2lst(instr['texty3']) # 3rd y-coordinate
llow0 = {}
lhigh0 = {}
texty1 = {}
for T in Temperatures:
llow0[T] = sum(absorbed[T].v()<float(instr['llow0[{0}]'.format(T)])) # index indicating start of lasing activity
lhigh0[T] = sum(absorbed[T].v()<float(instr['lhigh0[{0}]'.format(T)])) # index corresponding to where linear segment stops
texty1[T] = float(instr['texty1[{0}]'.format(T)])
#
#
#------------------------------------
#------------------------------------
# plot
cols = varycolor(3*len(Temperatures))
plt.subplot(3,1,1)
cnt = 0 # color counter
q0,m0 = {},{} # for linreg
for T in Temperatures:
# linreg
q0[T],m0[T] = ev.linreg(absorbed[T].v()[llow0[T]:lhigh0[T]],
emitted[T].v()[llow0[T]:lhigh0[T]],
emitted[T].e()[llow0[T]:lhigh0[T]],
overwrite_zeroerrors=True)
emax,emaxi = ev.max(emitted[T],True)
amax = absorbed[T][emaxi]
print 'Max emission at ({}) degC at ({}) W absorbed power: ({}) W'.format(T_out[T],amax,emax)
# plot
plt.errorbar(absorbed[T].v(),emitted[T].v(),
xerr=absorbed[T].e(),yerr=emitted[T].e(),
c=cols[cnt],linestyle=' ')
plt.plot(absorbed[T].v(),m0[T].v()*absorbed[T].v()+q0[T].v(),c=cols[cnt+1])
plt.text(textx,texty1[T],
'${0}$$^\circ$C: ${1}$ %'.format(T_out[T],m0[T].round(3)*100),
color=cols[cnt],fontsize=fontsize)
cnt+=3
plt.title(title)
plt.xlabel('Absorbed power (W)')
plt.ylabel('Emited power (W)')
plt.xlim(xlim)
plt.ylim(ylim1)
plt.grid('on')
#plt.show()
#------------------------------------
plt.subplot(3,1,2)
cnt = 0 # reset color counter
q1,m1 = {},{}
for T in Temperatures:
relref = reflected[T]/pumped[T]*100
# plot
plt.errorbar(absorbed[T].v(),relref.v(),
xerr=absorbed[T].e(),yerr=relref.e(),
c=cols[cnt],linestyle=' ')
cnt+=3
plt.title(title)
plt.xlabel('Absorbed power (W)')
plt.ylabel('Reflectivity (%)')
plt.xlim(xlim)
plt.ylim(ylim2)
plt.grid('on')
#plt.show()
#------------------------------------
# plot dissipation and spectra
plt.subplot(3,1,3)
cnt = 0 # reset
q3,m3 = {},{}
for T in Temperatures:
if T in plot_temps_for_3:
# lambda_short
#plt.errorbar(dissipated[T].v(),spectra[T][0].v(),
# xerr=dissipated[T].e(),yerr=spectra[T][0].e(),
# c=cols[cnt],linestyle=' ')
# lambda_long
# lin reg for range that lases (>threshold, <roll over), hence instr from subplot 1
q3[T],m3[T] = ev.linreg(dissipated[T].v()[llow0[T]:lhigh0[T]],
spectra[T][1].v()[llow0[T]:lhigh0[T]],
spectra[T][1].e()[llow0[T]:lhigh0[T]],
overwrite_zeroerrors=True)
# show only a part, not to confuse reader
#plt.errorbar(dissipated[T].v()[llow0[T]:lhigh0[T]],spectra[T][1].v()[llow0[T]:lhigh0[T]],
# xerr=dissipated[T].e()[llow0[T]:lhigh0[T]],yerr=spectra[T][1].e()[llow0[T]:lhigh0[T]],
# c=cols[cnt],linestyle=' ')
# show the whole range
plt.errorbar(dissipated[T].v(),spectra[T][1].v(),
xerr=dissipated[T].e(),yerr=spectra[T][1].e(),
c=cols[cnt],linestyle=' ')
cnt += 3
plt.title(title)
plt.xlim(xlim3)
plt.ylim(ylim3)
plt.xlim()
plt.xlabel('Dissipated power (W)')
plt.ylabel('Wavelength (nm)')
plt.grid('on')
cnt = 0 # reset
wavelength = ev.errvallist([q3[T] for T in plot_temps_for_3]) # wavelength offsets
slopes = ev.errvallist([m3[T] for T in plot_temps_for_3]) # slopes
T_active = ev.errvallist([T_out[T] for T in plot_temps_for_3])
dldD, dldT, l0 = thermal_resistance(T_active,wavelength,slopes) #, R_th
R_th = dldD/dldT
for T in Temperatures:
if T in plot_temps_for_3:
plt.plot(dissipated[T].v(),l0.v() + dldT.v()*T_out[T].v() + dldD.v()*dissipated[T].v(),c=cols[cnt+1])
cnt+=3
plt.text(textx3,texty3[0],
'$\lambda=$'+'$({})$'.format(dldT.round(3))+'$T_{hs}+$'+'$({})$'.format(dldD.round(3))+'$D+$'+'${}$'.format(l0.round(3)),
color='k')
R_th = R_th.round(2)
therm_imp = 'Thermal impedance: $({0})$ K/W'.format(R_th)
plt.text(textx3,texty3[1],
therm_imp,color='k')
print therm_imp
for T in Temperatures:
print meantemp[T]
plt.show()
if __name__ == "__main__":
main()
| mit | 7,744,530,179,776,189,000 | 37.767857 | 206 | 0.551474 | false | 3.202065 | false | false | false |
jenisys/behave | examples/async_step/features/environment.py | 2 | 1530 | # -*- coding: UTF-8 -*-
from behave.tag_matcher import ActiveTagMatcher, setup_active_tag_values
from behave.api.runtime_constraint import require_min_python_version
from behave import python_feature
# -----------------------------------------------------------------------------
# REQUIRE: python >= 3.4
# -----------------------------------------------------------------------------
require_min_python_version("3.4")
# -----------------------------------------------------------------------------
# SUPPORT: Active-tags
# -----------------------------------------------------------------------------
# -- MATCHES ANY TAGS: @use.with_{category}={value}
# NOTE: active_tag_value_provider provides category values for active tags.
active_tag_value_provider = python_feature.ACTIVE_TAG_VALUE_PROVIDER.copy()
active_tag_matcher = ActiveTagMatcher(active_tag_value_provider)
# -----------------------------------------------------------------------------
# HOOKS:
# -----------------------------------------------------------------------------
def before_all(ctx):
# -- SETUP ACTIVE-TAG MATCHER (with userdata):
setup_active_tag_values(active_tag_value_provider, ctx.config.userdata)
def before_feature(ctx, feature):
if active_tag_matcher.should_exclude_with(feature.tags):
feature.skip(reason=active_tag_matcher.exclude_reason)
def before_scenario(ctx, scenario):
if active_tag_matcher.should_exclude_with(scenario.effective_tags):
scenario.skip(reason=active_tag_matcher.exclude_reason)
| bsd-2-clause | -7,307,714,831,485,707,000 | 38.230769 | 79 | 0.515033 | false | 4.608434 | false | false | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.