repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
jallohm/django | tests/template_tests/templatetags/custom.py | 152 | 5394 | import operator
import warnings
from django import template
from django.template.defaultfilters import stringfilter
from django.utils import six
from django.utils.html import escape, format_html
register = template.Library()
@register.filter
@stringfilter
def trim(value, num):
return value[:num]
@register.filter
def noop(value, param=None):
"""A noop filter that always return its first argument and does nothing with
its second (optional) one.
Useful for testing out whitespace in filter arguments (see #19882)."""
return value
@register.simple_tag(takes_context=True)
def context_stack_length(context):
return len(context.dicts)
@register.simple_tag
def no_params():
"""Expected no_params __doc__"""
return "no_params - Expected result"
no_params.anything = "Expected no_params __dict__"
@register.simple_tag
def one_param(arg):
"""Expected one_param __doc__"""
return "one_param - Expected result: %s" % arg
one_param.anything = "Expected one_param __dict__"
@register.simple_tag(takes_context=False)
def explicit_no_context(arg):
"""Expected explicit_no_context __doc__"""
return "explicit_no_context - Expected result: %s" % arg
explicit_no_context.anything = "Expected explicit_no_context __dict__"
@register.simple_tag(takes_context=True)
def no_params_with_context(context):
"""Expected no_params_with_context __doc__"""
return "no_params_with_context - Expected result (context value: %s)" % context['value']
no_params_with_context.anything = "Expected no_params_with_context __dict__"
@register.simple_tag(takes_context=True)
def params_and_context(context, arg):
"""Expected params_and_context __doc__"""
return "params_and_context - Expected result (context value: %s): %s" % (context['value'], arg)
params_and_context.anything = "Expected params_and_context __dict__"
@register.simple_tag
def simple_two_params(one, two):
"""Expected simple_two_params __doc__"""
return "simple_two_params - Expected result: %s, %s" % (one, two)
simple_two_params.anything = "Expected simple_two_params __dict__"
@register.simple_tag
def simple_one_default(one, two='hi'):
"""Expected simple_one_default __doc__"""
return "simple_one_default - Expected result: %s, %s" % (one, two)
simple_one_default.anything = "Expected simple_one_default __dict__"
@register.simple_tag
def simple_unlimited_args(one, two='hi', *args):
"""Expected simple_unlimited_args __doc__"""
return "simple_unlimited_args - Expected result: %s" % (', '.join(six.text_type(arg) for arg in [one, two] + list(args)))
simple_unlimited_args.anything = "Expected simple_unlimited_args __dict__"
@register.simple_tag
def simple_only_unlimited_args(*args):
"""Expected simple_only_unlimited_args __doc__"""
return "simple_only_unlimited_args - Expected result: %s" % ', '.join(six.text_type(arg) for arg in args)
simple_only_unlimited_args.anything = "Expected simple_only_unlimited_args __dict__"
@register.simple_tag
def simple_unlimited_args_kwargs(one, two='hi', *args, **kwargs):
"""Expected simple_unlimited_args_kwargs __doc__"""
# Sort the dictionary by key to guarantee the order for testing.
sorted_kwarg = sorted(six.iteritems(kwargs), key=operator.itemgetter(0))
return "simple_unlimited_args_kwargs - Expected result: %s / %s" % (
', '.join(six.text_type(arg) for arg in [one, two] + list(args)),
', '.join('%s=%s' % (k, v) for (k, v) in sorted_kwarg)
)
simple_unlimited_args_kwargs.anything = "Expected simple_unlimited_args_kwargs __dict__"
@register.simple_tag(takes_context=True)
def simple_tag_without_context_parameter(arg):
"""Expected simple_tag_without_context_parameter __doc__"""
return "Expected result"
simple_tag_without_context_parameter.anything = "Expected simple_tag_without_context_parameter __dict__"
@register.simple_tag(takes_context=True)
def escape_naive(context):
"""A tag that doesn't even think about escaping issues"""
return "Hello {0}!".format(context['name'])
@register.simple_tag(takes_context=True)
def escape_explicit(context):
"""A tag that uses escape explicitly"""
return escape("Hello {0}!".format(context['name']))
@register.simple_tag(takes_context=True)
def escape_format_html(context):
"""A tag that uses format_html"""
return format_html("Hello {0}!", context['name'])
@register.simple_tag(takes_context=True)
def current_app(context):
return "%s" % context.current_app
@register.simple_tag(takes_context=True)
def use_l10n(context):
return "%s" % context.use_l10n
@register.simple_tag(name='minustwo')
def minustwo_overridden_name(value):
return value - 2
register.simple_tag(lambda x: x - 1, name='minusone')
with warnings.catch_warnings():
warnings.simplefilter('ignore')
@register.assignment_tag
def assignment_no_params():
"""Expected assignment_no_params __doc__"""
return "assignment_no_params - Expected result"
assignment_no_params.anything = "Expected assignment_no_params __dict__"
@register.assignment_tag(takes_context=True)
def assignment_tag_without_context_parameter(arg):
"""Expected assignment_tag_without_context_parameter __doc__"""
return "Expected result"
assignment_tag_without_context_parameter.anything = "Expected assignment_tag_without_context_parameter __dict__"
| bsd-3-clause |
rasata/pypes | ui/pypesvds/plugins/splitoperator/splitoperator.py | 5 | 1061 | import logging
#import traceback
from pypes.component import Component
log = logging.getLogger(__name__)
class Split(Component):
__metatype__ = 'OPERATOR'
def __init__(self):
# initialize parent class
Component.__init__(self)
self.add_output('out2', 'Second Output Port')
log.info('Component Initialized: %s' % self.__class__.__name__)
def run(self):
# Define our components entry point
while True:
# for each document waiting on our input port
for doc in self.receive_all('in'):
try:
cloned = doc.clone()
self.send('out', doc)
self.send('out2', cloned)
except Exception as e:
log.error('Component Failed: %s' % self.__class__.__name__)
log.error('Reason: %s' % str(e))
#log.error(traceback.print_exc())
# yield the CPU, allowing another component to run
self.yield_ctrl()
| apache-2.0 |
Mj258/weiboapi | srapyDemo/envs/Lib/site-packages/twisted/python/util.py | 2 | 31276 | # -*- test-case-name: twisted.python.test.test_util -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
from __future__ import division, absolute_import, print_function
import os, sys, errno, warnings
try:
import pwd, grp
except ImportError:
pwd = grp = None
try:
from os import setgroups, getgroups
except ImportError:
setgroups = getgroups = None
from twisted.python.compat import _PY3, unicode
if _PY3:
UserDict = object
else:
from UserDict import UserDict
class InsensitiveDict:
"""Dictionary, that has case-insensitive keys.
Normally keys are retained in their original form when queried with
.keys() or .items(). If initialized with preserveCase=0, keys are both
looked up in lowercase and returned in lowercase by .keys() and .items().
"""
"""
Modified recipe at
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66315 originally
contributed by Sami Hangaslammi.
"""
def __init__(self, dict=None, preserve=1):
"""Create an empty dictionary, or update from 'dict'."""
self.data = {}
self.preserve=preserve
if dict:
self.update(dict)
def __delitem__(self, key):
k=self._lowerOrReturn(key)
del self.data[k]
def _lowerOrReturn(self, key):
if isinstance(key, bytes) or isinstance(key, unicode):
return key.lower()
else:
return key
def __getitem__(self, key):
"""Retrieve the value associated with 'key' (in any case)."""
k = self._lowerOrReturn(key)
return self.data[k][1]
def __setitem__(self, key, value):
"""Associate 'value' with 'key'. If 'key' already exists, but
in different case, it will be replaced."""
k = self._lowerOrReturn(key)
self.data[k] = (key, value)
def has_key(self, key):
"""Case insensitive test whether 'key' exists."""
k = self._lowerOrReturn(key)
return k in self.data
__contains__ = has_key
def _doPreserve(self, key):
if not self.preserve and (isinstance(key, bytes)
or isinstance(key, unicode)):
return key.lower()
else:
return key
def keys(self):
"""List of keys in their original case."""
return list(self.iterkeys())
def values(self):
"""List of values."""
return list(self.itervalues())
def items(self):
"""List of (key,value) pairs."""
return list(self.iteritems())
def get(self, key, default=None):
"""Retrieve value associated with 'key' or return default value
if 'key' doesn't exist."""
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default):
"""If 'key' doesn't exists, associate it with the 'default' value.
Return value associated with 'key'."""
if not self.has_key(key):
self[key] = default
return self[key]
def update(self, dict):
"""Copy (key,value) pairs from 'dict'."""
for k,v in dict.items():
self[k] = v
def __repr__(self):
"""String representation of the dictionary."""
items = ", ".join([("%r: %r" % (k,v)) for k,v in self.items()])
return "InsensitiveDict({%s})" % items
def iterkeys(self):
for v in self.data.values():
yield self._doPreserve(v[0])
def itervalues(self):
for v in self.data.values():
yield v[1]
def iteritems(self):
for (k, v) in self.data.values():
yield self._doPreserve(k), v
def popitem(self):
i=self.items()[0]
del self[i[0]]
return i
def clear(self):
for k in self.keys():
del self[k]
def copy(self):
return InsensitiveDict(self, self.preserve)
def __len__(self):
return len(self.data)
def __eq__(self, other):
for k,v in self.items():
if not (k in other) or not (other[k]==v):
return 0
return len(self)==len(other)
class OrderedDict(UserDict):
"""A UserDict that preserves insert order whenever possible."""
def __init__(self, dict=None, **kwargs):
self._order = []
self.data = {}
if dict is not None:
if hasattr(dict,'keys'):
self.update(dict)
else:
for k,v in dict: # sequence
self[k] = v
if len(kwargs):
self.update(kwargs)
def __repr__(self):
return '{'+', '.join([('%r: %r' % item) for item in self.items()])+'}'
def __setitem__(self, key, value):
if not self.has_key(key):
self._order.append(key)
UserDict.__setitem__(self, key, value)
def copy(self):
return self.__class__(self)
def __delitem__(self, key):
UserDict.__delitem__(self, key)
self._order.remove(key)
def iteritems(self):
for item in self._order:
yield (item, self[item])
def items(self):
return list(self.iteritems())
def itervalues(self):
for item in self._order:
yield self[item]
def values(self):
return list(self.itervalues())
def iterkeys(self):
return iter(self._order)
def keys(self):
return list(self._order)
def popitem(self):
key = self._order[-1]
value = self[key]
del self[key]
return (key, value)
def setdefault(self, item, default):
if self.has_key(item):
return self[item]
self[item] = default
return default
def update(self, d):
for k, v in d.items():
self[k] = v
if _PY3:
# Python 3 has its own OrderedDict that we should use instead.
del OrderedDict
from collections import OrderedDict
def uniquify(lst):
"""Make the elements of a list unique by inserting them into a dictionary.
This must not change the order of the input lst.
"""
dct = {}
result = []
for k in lst:
if k not in dct:
result.append(k)
dct[k] = 1
return result
def padTo(n, seq, default=None):
"""
Pads a sequence out to n elements,
filling in with a default value if it is not long enough.
If the input sequence is longer than n, raises ValueError.
Details, details:
This returns a new list; it does not extend the original sequence.
The new list contains the values of the original sequence, not copies.
"""
if len(seq) > n:
raise ValueError("%d elements is more than %d." % (len(seq), n))
blank = [default] * n
blank[:len(seq)] = list(seq)
return blank
def getPluginDirs():
warnings.warn(
"twisted.python.util.getPluginDirs is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
import twisted
systemPlugins = os.path.join(os.path.dirname(os.path.dirname(
os.path.abspath(twisted.__file__))), 'plugins')
userPlugins = os.path.expanduser("~/TwistedPlugins")
confPlugins = os.path.expanduser("~/.twisted")
allPlugins = filter(os.path.isdir, [systemPlugins, userPlugins, confPlugins])
return allPlugins
def addPluginDir():
warnings.warn(
"twisted.python.util.addPluginDir is deprecated since Twisted 12.2.",
DeprecationWarning, stacklevel=2)
sys.path.extend(getPluginDirs())
def sibpath(path, sibling):
"""
Return the path to a sibling of a file in the filesystem.
This is useful in conjunction with the special C{__file__} attribute
that Python provides for modules, so modules can load associated
resource files.
"""
return os.path.join(os.path.dirname(os.path.abspath(path)), sibling)
def _getpass(prompt):
"""
Helper to turn IOErrors into KeyboardInterrupts.
"""
import getpass
try:
return getpass.getpass(prompt)
except IOError as e:
if e.errno == errno.EINTR:
raise KeyboardInterrupt
raise
except EOFError:
raise KeyboardInterrupt
def getPassword(prompt = 'Password: ', confirm = 0, forceTTY = 0,
confirmPrompt = 'Confirm password: ',
mismatchMessage = "Passwords don't match."):
"""Obtain a password by prompting or from stdin.
If stdin is a terminal, prompt for a new password, and confirm (if
C{confirm} is true) by asking again to make sure the user typed the same
thing, as keystrokes will not be echoed.
If stdin is not a terminal, and C{forceTTY} is not true, read in a line
and use it as the password, less the trailing newline, if any. If
C{forceTTY} is true, attempt to open a tty and prompt for the password
using it. Raise a RuntimeError if this is not possible.
@returns: C{str}
"""
isaTTY = hasattr(sys.stdin, 'isatty') and sys.stdin.isatty()
old = None
try:
if not isaTTY:
if forceTTY:
try:
old = sys.stdin, sys.stdout
sys.stdin = sys.stdout = open('/dev/tty', 'r+')
except:
raise RuntimeError("Cannot obtain a TTY")
else:
password = sys.stdin.readline()
if password[-1] == '\n':
password = password[:-1]
return password
while 1:
try1 = _getpass(prompt)
if not confirm:
return try1
try2 = _getpass(confirmPrompt)
if try1 == try2:
return try1
else:
sys.stderr.write(mismatchMessage + "\n")
finally:
if old:
sys.stdin.close()
sys.stdin, sys.stdout = old
def println(*a):
sys.stdout.write(' '.join(map(str, a))+'\n')
# XXX
# This does not belong here
# But where does it belong?
def str_xor(s, b):
return ''.join([chr(ord(c) ^ b) for c in s])
def makeStatBar(width, maxPosition, doneChar = '=', undoneChar = '-', currentChar = '>'):
"""
Creates a function that will return a string representing a progress bar.
"""
aValue = width / float(maxPosition)
def statBar(position, force = 0, last = ['']):
assert len(last) == 1, "Don't mess with the last parameter."
done = int(aValue * position)
toDo = width - done - 2
result = "[%s%s%s]" % (doneChar * done, currentChar, undoneChar * toDo)
if force:
last[0] = result
return result
if result == last[0]:
return ''
last[0] = result
return result
statBar.__doc__ = """statBar(position, force = 0) -> '[%s%s%s]'-style progress bar
returned string is %d characters long, and the range goes from 0..%d.
The 'position' argument is where the '%s' will be drawn. If force is false,
'' will be returned instead if the resulting progress bar is identical to the
previously returned progress bar.
""" % (doneChar * 3, currentChar, undoneChar * 3, width, maxPosition, currentChar)
return statBar
def spewer(frame, s, ignored):
"""
A trace function for sys.settrace that prints every function or method call.
"""
from twisted.python import reflect
if 'self' in frame.f_locals:
se = frame.f_locals['self']
if hasattr(se, '__class__'):
k = reflect.qual(se.__class__)
else:
k = reflect.qual(type(se))
print('method %s of %s at %s' % (
frame.f_code.co_name, k, id(se)))
else:
print('function %s in %s, line %s' % (
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_lineno))
def searchupwards(start, files=[], dirs=[]):
"""
Walk upwards from start, looking for a directory containing
all files and directories given as arguments::
>>> searchupwards('.', ['foo.txt'], ['bar', 'bam'])
If not found, return None
"""
start=os.path.abspath(start)
parents=start.split(os.sep)
exists=os.path.exists; join=os.sep.join; isdir=os.path.isdir
while len(parents):
candidate=join(parents)+os.sep
allpresent=1
for f in files:
if not exists("%s%s" % (candidate, f)):
allpresent=0
break
if allpresent:
for d in dirs:
if not isdir("%s%s" % (candidate, d)):
allpresent=0
break
if allpresent: return candidate
parents.pop(-1)
return None
class LineLog:
"""
A limited-size line-based log, useful for logging line-based
protocols such as SMTP.
When the log fills up, old entries drop off the end.
"""
def __init__(self, size=10):
"""
Create a new log, with size lines of storage (default 10).
A log size of 0 (or less) means an infinite log.
"""
if size < 0:
size = 0
self.log = [None]*size
self.size = size
def append(self,line):
if self.size:
self.log[:-1] = self.log[1:]
self.log[-1] = line
else:
self.log.append(line)
def str(self):
return '\n'.join(filter(None,self.log))
def __getitem__(self, item):
return filter(None,self.log)[item]
def clear(self):
"""Empty the log"""
self.log = [None]*self.size
def raises(exception, f, *args, **kwargs):
"""
Determine whether the given call raises the given exception.
"""
try:
f(*args, **kwargs)
except exception:
return 1
return 0
class IntervalDifferential(object):
"""
Given a list of intervals, generate the amount of time to sleep between
"instants".
For example, given 7, 11 and 13, the three (infinite) sequences::
7 14 21 28 35 ...
11 22 33 44 ...
13 26 39 52 ...
will be generated, merged, and used to produce::
(7, 0) (4, 1) (2, 2) (1, 0) (7, 0) (1, 1) (4, 2) (2, 0) (5, 1) (2, 0)
New intervals may be added or removed as iteration proceeds using the
proper methods.
"""
def __init__(self, intervals, default=60):
"""
@type intervals: C{list} of C{int}, C{long}, or C{float} param
@param intervals: The intervals between instants.
@type default: C{int}, C{long}, or C{float}
@param default: The duration to generate if the intervals list
becomes empty.
"""
self.intervals = intervals[:]
self.default = default
def __iter__(self):
return _IntervalDifferentialIterator(self.intervals, self.default)
class _IntervalDifferentialIterator(object):
def __init__(self, i, d):
self.intervals = [[e, e, n] for (e, n) in zip(i, range(len(i)))]
self.default = d
self.last = 0
def __next__(self):
if not self.intervals:
return (self.default, None)
last, index = self.intervals[0][0], self.intervals[0][2]
self.intervals[0][0] += self.intervals[0][1]
self.intervals.sort()
result = last - self.last
self.last = last
return result, index
# Iterators on Python 2 use next(), not __next__()
next = __next__
def addInterval(self, i):
if self.intervals:
delay = self.intervals[0][0] - self.intervals[0][1]
self.intervals.append([delay + i, i, len(self.intervals)])
self.intervals.sort()
else:
self.intervals.append([i, i, 0])
def removeInterval(self, interval):
for i in range(len(self.intervals)):
if self.intervals[i][1] == interval:
index = self.intervals[i][2]
del self.intervals[i]
for i in self.intervals:
if i[2] > index:
i[2] -= 1
return
raise ValueError("Specified interval not in IntervalDifferential")
class FancyStrMixin:
"""
Mixin providing a flexible implementation of C{__str__}.
C{__str__} output will begin with the name of the class, or the contents
of the attribute C{fancybasename} if it is set.
The body of C{__str__} can be controlled by overriding C{showAttributes} in
a subclass. Set C{showAttributes} to a sequence of strings naming
attributes, or sequences of C{(attributeName, callable)}, or sequences of
C{(attributeName, displayName, formatCharacter)}. In the second case, the
callable is passed the value of the attribute and its return value used in
the output of C{__str__}. In the final case, the attribute is looked up
using C{attributeName}, but the output uses C{displayName} instead, and
renders the value of the attribute using C{formatCharacter}, e.g. C{"%.3f"}
might be used for a float.
"""
# Override in subclasses:
showAttributes = ()
def __str__(self):
r = ['<', (hasattr(self, 'fancybasename') and self.fancybasename)
or self.__class__.__name__]
for attr in self.showAttributes:
if isinstance(attr, str):
r.append(' %s=%r' % (attr, getattr(self, attr)))
elif len(attr) == 2:
r.append((' %s=' % (attr[0],)) + attr[1](getattr(self, attr[0])))
else:
r.append((' %s=' + attr[2]) % (attr[1], getattr(self, attr[0])))
r.append('>')
return ''.join(r)
__repr__ = __str__
class FancyEqMixin:
"""
Mixin that implements C{__eq__} and C{__ne__}.
Comparison is done using the list of attributes defined in
C{compareAttributes}.
"""
compareAttributes = ()
def __eq__(self, other):
if not self.compareAttributes:
return self is other
if isinstance(self, other.__class__):
return (
[getattr(self, name) for name in self.compareAttributes] ==
[getattr(other, name) for name in self.compareAttributes])
return NotImplemented
def __ne__(self, other):
result = self.__eq__(other)
if result is NotImplemented:
return result
return not result
try:
# Python 2.7 / Python 3.3
from os import initgroups as _c_initgroups
except ImportError:
try:
# Python 2.6
from twisted.python._initgroups import initgroups as _c_initgroups
except ImportError:
_c_initgroups = None
if pwd is None or grp is None or setgroups is None or getgroups is None:
def initgroups(uid, primaryGid):
"""
Do nothing.
Underlying platform support require to manipulate groups is missing.
"""
else:
# Fallback to the inefficient Python version
def _setgroups_until_success(l):
while(1):
# NASTY NASTY HACK (but glibc does it so it must be okay):
# In case sysconfig didn't give the right answer, find the limit
# on max groups by just looping, trying to set fewer and fewer
# groups each time until it succeeds.
try:
setgroups(l)
except ValueError:
# This exception comes from python itself restricting
# number of groups allowed.
if len(l) > 1:
del l[-1]
else:
raise
except OSError as e:
if e.errno == errno.EINVAL and len(l) > 1:
# This comes from the OS saying too many groups
del l[-1]
else:
raise
else:
# Success, yay!
return
def initgroups(uid, primaryGid):
"""
Initializes the group access list.
If the C extension is present, we're calling it, which in turn calls
initgroups(3).
If not, this is done by reading the group database /etc/group and using
all groups of which C{uid} is a member. The additional group
C{primaryGid} is also added to the list.
If the given user is a member of more than C{NGROUPS}, arbitrary
groups will be silently discarded to bring the number below that
limit.
@type uid: C{int}
@param uid: The UID for which to look up group information.
@type primaryGid: C{int} or C{NoneType}
@param primaryGid: If provided, an additional GID to include when
setting the groups.
"""
if _c_initgroups is not None:
return _c_initgroups(pwd.getpwuid(uid)[0], primaryGid)
try:
# Try to get the maximum number of groups
max_groups = os.sysconf("SC_NGROUPS_MAX")
except:
# No predefined limit
max_groups = 0
username = pwd.getpwuid(uid)[0]
l = []
if primaryGid is not None:
l.append(primaryGid)
for groupname, password, gid, userlist in grp.getgrall():
if username in userlist:
l.append(gid)
if len(l) == max_groups:
break # No more groups, ignore any more
try:
_setgroups_until_success(l)
except OSError as e:
# We might be able to remove this code now that we
# don't try to setgid/setuid even when not asked to.
if e.errno == errno.EPERM:
for g in getgroups():
if g not in l:
raise
else:
raise
def switchUID(uid, gid, euid=False):
"""
Attempts to switch the uid/euid and gid/egid for the current process.
If C{uid} is the same value as L{os.getuid} (or L{os.geteuid}),
this function will issue a L{UserWarning} and not raise an exception.
@type uid: C{int} or C{NoneType}
@param uid: the UID (or EUID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type gid: C{int} or C{NoneType}
@param gid: the GID (or EGID) to switch the current process to. This
parameter will be ignored if the value is C{None}.
@type euid: C{bool}
@param euid: if True, set only effective user-id rather than real user-id.
(This option has no effect unless the process is running
as root, in which case it means not to shed all
privileges, retaining the option to regain privileges
in cases such as spawning processes. Use with caution.)
"""
if euid:
setuid = os.seteuid
setgid = os.setegid
getuid = os.geteuid
else:
setuid = os.setuid
setgid = os.setgid
getuid = os.getuid
if gid is not None:
setgid(gid)
if uid is not None:
if uid == getuid():
uidText = (euid and "euid" or "uid")
actionText = "tried to drop privileges and set%s %s" % (uidText, uid)
problemText = "%s is already %s" % (uidText, getuid())
warnings.warn("%s but %s; should we be root? Continuing."
% (actionText, problemText))
else:
initgroups(uid, gid)
setuid(uid)
class SubclassableCStringIO(object):
"""
A wrapper around cStringIO to allow for subclassing.
"""
__csio = None
def __init__(self, *a, **kw):
from cStringIO import StringIO
self.__csio = StringIO(*a, **kw)
def __iter__(self):
return self.__csio.__iter__()
def next(self):
return self.__csio.next()
def close(self):
return self.__csio.close()
def isatty(self):
return self.__csio.isatty()
def seek(self, pos, mode=0):
return self.__csio.seek(pos, mode)
def tell(self):
return self.__csio.tell()
def read(self, n=-1):
return self.__csio.read(n)
def readline(self, length=None):
return self.__csio.readline(length)
def readlines(self, sizehint=0):
return self.__csio.readlines(sizehint)
def truncate(self, size=None):
return self.__csio.truncate(size)
def write(self, s):
return self.__csio.write(s)
def writelines(self, list):
return self.__csio.writelines(list)
def flush(self):
return self.__csio.flush()
def getvalue(self):
return self.__csio.getvalue()
def untilConcludes(f, *a, **kw):
"""
Call C{f} with the given arguments, handling C{EINTR} by retrying.
@param f: A function to call.
@param *a: Positional arguments to pass to C{f}.
@param **kw: Keyword arguments to pass to C{f}.
@return: Whatever C{f} returns.
@raise: Whatever C{f} raises, except for C{IOError} or C{OSError} with
C{errno} set to C{EINTR}.
"""
while True:
try:
return f(*a, **kw)
except (IOError, OSError) as e:
if e.args[0] == errno.EINTR:
continue
raise
def mergeFunctionMetadata(f, g):
"""
Overwrite C{g}'s name and docstring with values from C{f}. Update
C{g}'s instance dictionary with C{f}'s.
@return: A function that has C{g}'s behavior and metadata merged from
C{f}.
"""
try:
g.__name__ = f.__name__
except TypeError:
pass
try:
g.__doc__ = f.__doc__
except (TypeError, AttributeError):
pass
try:
g.__dict__.update(f.__dict__)
except (TypeError, AttributeError):
pass
try:
g.__module__ = f.__module__
except TypeError:
pass
return g
def nameToLabel(mname):
"""
Convert a string like a variable name into a slightly more human-friendly
string with spaces and capitalized letters.
@type mname: C{str}
@param mname: The name to convert to a label. This must be a string
which could be used as a Python identifier. Strings which do not take
this form will result in unpredictable behavior.
@rtype: C{str}
"""
labelList = []
word = ''
lastWasUpper = False
for letter in mname:
if letter.isupper() == lastWasUpper:
# Continuing a word.
word += letter
else:
# breaking a word OR beginning a word
if lastWasUpper:
# could be either
if len(word) == 1:
# keep going
word += letter
else:
# acronym
# we're processing the lowercase letter after the acronym-then-capital
lastWord = word[:-1]
firstLetter = word[-1]
labelList.append(lastWord)
word = firstLetter + letter
else:
# definitely breaking: lower to upper
labelList.append(word)
word = letter
lastWasUpper = letter.isupper()
if labelList:
labelList[0] = labelList[0].capitalize()
else:
return mname.capitalize()
labelList.append(word)
return ' '.join(labelList)
def uidFromString(uidString):
"""
Convert a user identifier, as a string, into an integer UID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a UID or the
name of a user which can be converted to a UID via L{pwd.getpwnam}.
@rtype: C{int}
@return: The integer UID corresponding to the given string.
@raise ValueError: If the user name is supplied and L{pwd} is not
available.
"""
try:
return int(uidString)
except ValueError:
if pwd is None:
raise
return pwd.getpwnam(uidString)[2]
def gidFromString(gidString):
"""
Convert a group identifier, as a string, into an integer GID.
@type uid: C{str}
@param uid: A string giving the base-ten representation of a GID or the
name of a group which can be converted to a GID via L{grp.getgrnam}.
@rtype: C{int}
@return: The integer GID corresponding to the given string.
@raise ValueError: If the group name is supplied and L{grp} is not
available.
"""
try:
return int(gidString)
except ValueError:
if grp is None:
raise
return grp.getgrnam(gidString)[2]
def runAsEffectiveUser(euid, egid, function, *args, **kwargs):
"""
Run the given function wrapped with seteuid/setegid calls.
This will try to minimize the number of seteuid/setegid calls, comparing
current and wanted permissions
@param euid: effective UID used to call the function.
@type euid: C{int}
@type egid: effective GID used to call the function.
@param egid: C{int}
@param function: the function run with the specific permission.
@type function: any callable
@param *args: arguments passed to C{function}
@param **kwargs: keyword arguments passed to C{function}
"""
uid, gid = os.geteuid(), os.getegid()
if uid == euid and gid == egid:
return function(*args, **kwargs)
else:
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(egid)
if euid != 0 and (euid != uid or gid != egid):
os.seteuid(euid)
try:
return function(*args, **kwargs)
finally:
if euid != 0 and (uid != euid or gid != egid):
os.seteuid(0)
if gid != egid:
os.setegid(gid)
if uid != 0 and (uid != euid or gid != egid):
os.seteuid(uid)
def runWithWarningsSuppressed(suppressedWarnings, f, *args, **kwargs):
"""
Run C{f(*args, **kwargs)}, but with some warnings suppressed.
Unlike L{twisted.internet.utils.runWithWarningsSuppressed}, it has no
special support for L{twisted.internet.defer.Deferred}.
@param suppressedWarnings: A list of arguments to pass to filterwarnings.
Must be a sequence of 2-tuples (args, kwargs).
@param f: A callable.
@param args: Arguments for C{f}.
@param kwargs: Keyword arguments for C{f}
@return: The result of C{f(*args, **kwargs)}.
"""
with warnings.catch_warnings():
for a, kw in suppressedWarnings:
warnings.filterwarnings(*a, **kw)
return f(*args, **kwargs)
__all__ = [
"uniquify", "padTo", "getPluginDirs", "addPluginDir", "sibpath",
"getPassword", "println", "makeStatBar", "OrderedDict",
"InsensitiveDict", "spewer", "searchupwards", "LineLog",
"raises", "IntervalDifferential", "FancyStrMixin", "FancyEqMixin",
"switchUID", "SubclassableCStringIO", "mergeFunctionMetadata",
"nameToLabel", "uidFromString", "gidFromString", "runAsEffectiveUser",
"untilConcludes", "runWithWarningsSuppressed",
]
if _PY3:
__notported__ = ["SubclassableCStringIO", "LineLog", "makeStatBar"]
for name in __all__[:]:
if name in __notported__:
__all__.remove(name)
del globals()[name]
del name, __notported__
| mit |
mfjb/scikit-learn | sklearn/covariance/tests/test_robust_covariance.py | 213 | 3359 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.validation import NotFittedError
from sklearn import datasets
from sklearn.covariance import empirical_covariance, MinCovDet, \
EllipticEnvelope
X = datasets.load_iris().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_mcd():
# Tests the FastMCD algorithm implementation
# Small data set
# test without outliers (random independent normal data)
launch_mcd_on_dataset(100, 5, 0, 0.01, 0.1, 80)
# test with a contaminated data set (medium contamination)
launch_mcd_on_dataset(100, 5, 20, 0.01, 0.01, 70)
# test with a contaminated data set (strong contamination)
launch_mcd_on_dataset(100, 5, 40, 0.1, 0.1, 50)
# Medium data set
launch_mcd_on_dataset(1000, 5, 450, 0.1, 0.1, 540)
# Large data set
launch_mcd_on_dataset(1700, 5, 800, 0.1, 0.1, 870)
# 1D data set
launch_mcd_on_dataset(500, 1, 100, 0.001, 0.001, 350)
def launch_mcd_on_dataset(n_samples, n_features, n_outliers, tol_loc, tol_cov,
tol_support):
rand_gen = np.random.RandomState(0)
data = rand_gen.randn(n_samples, n_features)
# add some outliers
outliers_index = rand_gen.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(rand_gen.randint(2, size=(n_outliers, n_features)) - 0.5)
data[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
pure_data = data[inliers_mask]
# compute MCD by fitting an object
mcd_fit = MinCovDet(random_state=rand_gen).fit(data)
T = mcd_fit.location_
S = mcd_fit.covariance_
H = mcd_fit.support_
# compare with the estimates learnt from the inliers
error_location = np.mean((pure_data.mean(0) - T) ** 2)
assert(error_location < tol_loc)
error_cov = np.mean((empirical_covariance(pure_data) - S) ** 2)
assert(error_cov < tol_cov)
assert(np.sum(H) >= tol_support)
assert_array_almost_equal(mcd_fit.mahalanobis(data), mcd_fit.dist_)
def test_mcd_issue1127():
# Check that the code does not break with X.shape = (3, 1)
# (i.e. n_support = n_samples)
rnd = np.random.RandomState(0)
X = rnd.normal(size=(3, 1))
mcd = MinCovDet()
mcd.fit(X)
def test_outlier_detection():
rnd = np.random.RandomState(0)
X = rnd.randn(100, 10)
clf = EllipticEnvelope(contamination=0.1)
assert_raises(NotFittedError, clf.predict, X)
assert_raises(NotFittedError, clf.decision_function, X)
clf.fit(X)
y_pred = clf.predict(X)
decision = clf.decision_function(X, raw_values=True)
decision_transformed = clf.decision_function(X, raw_values=False)
assert_array_almost_equal(
decision, clf.mahalanobis(X))
assert_array_almost_equal(clf.mahalanobis(X), clf.dist_)
assert_almost_equal(clf.score(X, np.ones(100)),
(100 - y_pred[y_pred == -1].size) / 100.)
assert(sum(y_pred == -1) == sum(decision_transformed < 0))
| bsd-3-clause |
andyh616/mne-python | examples/preprocessing/plot_run_ica.py | 13 | 1519 | # doc:slow-example
"""
================================
Compute ICA components on epochs
================================
ICA is fit to MEG raw data.
We assume that the non-stationary EOG artifacts have already been removed.
The sources matching the ECG are automatically found and displayed.
Subsequently, artefact detection and rejection quality are assessed.
Finally, the impact on the evoked ERF is visualized.
Note that this example does quite a bit of processing, so even on a
fast machine it can take about a minute to complete.
"""
# Authors: Denis Engemann <[email protected]>
#
# License: BSD (3-clause)
import mne
from mne.io import Raw
from mne.preprocessing import ICA, create_ecg_epochs
from mne.datasets import sample
print(__doc__)
###############################################################################
# Fit ICA model using the FastICA algorithm, detect and inspect components
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = Raw(raw_fname, preload=True)
raw.filter(1, 30, method='iir')
raw.pick_types(meg=True, eeg=False, exclude='bads', stim=True)
# longer + more epochs for more artifact exposure
events = mne.find_events(raw, stim_channel='STI 014')
epochs = mne.Epochs(raw, events, event_id=None, tmin=-0.2, tmax=0.5)
ica = ICA(n_components=0.95, method='fastica').fit(epochs)
ecg_epochs = create_ecg_epochs(raw, tmin=-.5, tmax=.5)
ecg_inds, scores = ica.find_bads_ecg(ecg_epochs)
ica.plot_components(ecg_inds)
| bsd-3-clause |
codeaudit/fuel | fuel/schemes.py | 21 | 10120 | from abc import ABCMeta, abstractmethod
from collections import Iterable
import numpy
from picklable_itertools import chain, repeat, imap, iter_
from picklable_itertools.extras import partition_all
from six import add_metaclass
from six.moves import xrange
from fuel import config
@add_metaclass(ABCMeta)
class IterationScheme(object):
"""An iteration scheme.
Iteration schemes provide a dataset-agnostic iteration scheme, such as
sequential batches, shuffled batches, etc. for datasets that choose to
support them.
Attributes
----------
requests_examples : bool
Whether requests produced by this scheme correspond to single
examples (as opposed to batches).
Notes
-----
Iteration schemes implement the :meth:`get_request_iterator` method,
which returns an iterator type (e.g. a generator or a class which
implements the `iterator protocol`_).
Stochastic iteration schemes should generally not be shared between
different data streams, because it would make experiments harder to
reproduce.
.. _iterator protocol:
https://docs.python.org/3.3/library/stdtypes.html#iterator-types
"""
@abstractmethod
def get_request_iterator(self):
"""Returns an iterator type."""
@add_metaclass(ABCMeta)
class BatchSizeScheme(IterationScheme):
"""Iteration scheme that returns batch sizes.
For infinite datasets it doesn't make sense to provide indices to
examples, but the number of samples per batch can still be given.
Hence BatchSizeScheme is the base class for iteration schemes
that only provide the number of examples that should be in a batch.
"""
requests_examples = False
@add_metaclass(ABCMeta)
class BatchScheme(IterationScheme):
"""Iteration schemes that return slices or indices for batches.
For datasets where the number of examples is known and easily
accessible (as is the case for most datasets which are small enough
to be kept in memory, like MNIST) we can provide slices or lists of
labels to the dataset.
Parameters
----------
examples : int or list
Defines which examples from the dataset are iterated.
If list, its items are the indices of examples.
If an integer, it will use that many examples from the beginning
of the dataset, i.e. it is interpreted as range(examples)
batch_size : int
The request iterator will return slices or list of indices in
batches of size `batch_size` until the end of `examples` is
reached.
Note that this means that the last batch size returned could be
smaller than `batch_size`. If you want to ensure all batches are
of equal size, then ensure len(`examples`) or `examples` is a
multiple of `batch_size`.
"""
requests_examples = False
def __init__(self, examples, batch_size):
if isinstance(examples, Iterable):
self.indices = examples
else:
self.indices = xrange(examples)
self.batch_size = batch_size
class ConcatenatedScheme(IterationScheme):
"""Build an iterator by concatenating several schemes' iterators.
Useful for iterating through different subsets of data in a specific
order.
Parameters
----------
schemes : list
A list of :class:`IterationSchemes`, whose request iterators
are to be concatenated in the order given.
Notes
-----
All schemes being concatenated must produce the same type of
requests (batches or examples).
"""
def __init__(self, schemes):
if not len(set(scheme.requests_examples for scheme in schemes)) == 1:
raise ValueError('all schemes must produce the same type of '
'requests (batches or examples)')
self.schemes = schemes
def get_request_iterator(self):
return chain(*[sch.get_request_iterator() for sch in self.schemes])
@property
def requests_examples(self):
return self.schemes[0].requests_examples
@add_metaclass(ABCMeta)
class IndexScheme(IterationScheme):
"""Iteration schemes that return single indices.
This is for datasets that support indexing (like :class:`BatchScheme`)
but where we want to return single examples instead of batches.
"""
requests_examples = True
def __init__(self, examples):
if isinstance(examples, Iterable):
self.indices = examples
else:
self.indices = xrange(examples)
class ConstantScheme(BatchSizeScheme):
"""Constant batch size iterator.
This subset iterator simply returns the same constant batch size
for a given number of times (or else infinitely).
Parameters
----------
batch_size : int
The size of the batch to return.
num_examples : int, optional
If given, the request iterator will return `batch_size` until the
sum reaches `num_examples`. Note that this means that the last
batch size returned could be smaller than `batch_size`. If you want
to ensure all batches are of equal size, then pass `times` equal to
``num_examples / batch-size`` instead.
times : int, optional
The number of times to return `batch_size`.
"""
def __init__(self, batch_size, num_examples=None, times=None):
if num_examples and times:
raise ValueError
self.batch_size = batch_size
self.num_examples = num_examples
self.times = times
def get_request_iterator(self):
if self.times:
return repeat(self.batch_size, self.times)
if self.num_examples:
d, r = divmod(self.num_examples, self.batch_size)
return chain(repeat(self.batch_size, d), [r] if r else [])
return repeat(self.batch_size)
class SequentialScheme(BatchScheme):
"""Sequential batches iterator.
Iterate over all the examples in a dataset of fixed size sequentially
in batches of a given size.
Notes
-----
The batch size isn't enforced, so the last batch could be smaller.
"""
def get_request_iterator(self):
return imap(list, partition_all(self.batch_size, self.indices))
class ShuffledScheme(BatchScheme):
"""Shuffled batches iterator.
Iterate over all the examples in a dataset of fixed size in shuffled
batches.
Parameters
----------
sorted_indices : bool, optional
If `True`, enforce that indices within a batch are ordered.
Defaults to `False`.
Notes
-----
The batch size isn't enforced, so the last batch could be smaller.
Shuffling the batches requires creating a shuffled list of indices in
memory. This can be memory-intensive for very large numbers of examples
(i.e. in the order of tens of millions).
"""
def __init__(self, *args, **kwargs):
self.rng = kwargs.pop('rng', None)
if self.rng is None:
self.rng = numpy.random.RandomState(config.default_seed)
self.sorted_indices = kwargs.pop('sorted_indices', False)
super(ShuffledScheme, self).__init__(*args, **kwargs)
def get_request_iterator(self):
indices = list(self.indices)
self.rng.shuffle(indices)
if self.sorted_indices:
return imap(sorted, partition_all(self.batch_size, indices))
else:
return imap(list, partition_all(self.batch_size, indices))
class SequentialExampleScheme(IndexScheme):
"""Sequential examples iterator.
Returns examples in order.
"""
def get_request_iterator(self):
return iter_(self.indices)
class ShuffledExampleScheme(IndexScheme):
"""Shuffled examples iterator.
Returns examples in random order.
"""
def __init__(self, *args, **kwargs):
self.rng = kwargs.pop('rng', None)
if self.rng is None:
self.rng = numpy.random.RandomState(config.default_seed)
super(ShuffledExampleScheme, self).__init__(*args, **kwargs)
def get_request_iterator(self):
indices = list(self.indices)
self.rng.shuffle(indices)
return iter_(indices)
def cross_validation(scheme_class, num_examples, num_folds, strict=True,
**kwargs):
"""Return pairs of schemes to be used for cross-validation.
Parameters
----------
scheme_class : subclass of :class:`IndexScheme` or :class:`BatchScheme`
The type of the returned schemes. The constructor is called with an
iterator and `**kwargs` as arguments.
num_examples : int
The number of examples in the datastream.
num_folds : int
The number of folds to return.
strict : bool, optional
If `True`, enforce that `num_examples` is divisible by `num_folds`
and so, that all validation sets have the same size. If `False`,
the size of the validation set is returned along the iteration
schemes. Defaults to `True`.
Yields
------
fold : tuple
The generator returns `num_folds` tuples. The first two elements of
the tuple are the training and validation iteration schemes. If
`strict` is set to `False`, the tuple has a third element
corresponding to the size of the validation set.
"""
if strict and num_examples % num_folds != 0:
raise ValueError(("{} examples are not divisible in {} evenly-sized " +
"folds. To allow this, have a look at the " +
"`strict` argument.").format(num_examples,
num_folds))
for i in xrange(num_folds):
begin = num_examples * i // num_folds
end = num_examples * (i+1) // num_folds
train = scheme_class(list(chain(xrange(0, begin),
xrange(end, num_examples))),
**kwargs)
valid = scheme_class(xrange(begin, end), **kwargs)
if strict:
yield (train, valid)
else:
yield (train, valid, end - begin)
| mit |
vitaly-krugl/nupic | src/nupic/swarming/hypersearch/object_json.py | 49 | 5091 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""JSON encoding and decoding."""
# Pylint gets confused about return types from deserialization.
# pylint: disable=E1103
import json
import sys
NON_OBJECT_TYPES = (type(None), bool, int, float, long, str, unicode)
class Types(object):
TUPLE = 'py/tuple'
SET = 'py/set'
DATETIME = 'datetime/datetime.datetime'
REPR = 'py/repr'
OBJECT = 'py/object'
KEYS = 'py/dict/keys'
def getImportPath(obj):
cls = obj.__class__
return '%s.%s' % (cls.__module__, cls.__name__)
def convertDict(obj):
obj = dict(obj)
for k, v in obj.items():
del obj[k]
if not (isinstance(k, str) or isinstance(k, unicode)):
k = dumps(k)
# Keep track of which keys need to be decoded when loading.
if Types.KEYS not in obj:
obj[Types.KEYS] = []
obj[Types.KEYS].append(k)
obj[k] = convertObjects(v)
return obj
def restoreKeysPostDecoding(obj):
if isinstance(obj, dict):
if Types.KEYS in obj:
for k in obj[Types.KEYS]:
v = obj[k]
del obj[k]
newKey = loads(k)
obj[newKey] = v
del obj[Types.KEYS]
for k, v in obj.items():
if isinstance(v, dict):
obj[k] = restoreKeysPostDecoding(v)
elif isinstance(obj, list):
obj = [restoreKeysPostDecoding(item) for item in obj]
elif isinstance(obj, set):
obj = set([restoreKeysPostDecoding(item) for item in obj])
elif isinstance(obj, tuple):
obj = tuple([restoreKeysPostDecoding(item) for item in obj])
return obj
def convertObjects(obj):
if type(obj) in NON_OBJECT_TYPES:
return obj
elif isinstance(obj, list):
return [convertObjects(item) for item in obj]
elif isinstance(obj, dict):
return convertDict(obj)
elif isinstance(obj, tuple):
return {Types.TUPLE: [convertObjects(item) for item in obj]}
elif isinstance(obj, set):
return {Types.SET: [convertObjects(item) for item in obj]}
else:
if hasattr(obj, '__getstate__'):
state = obj.__getstate__()
elif hasattr(obj, '__slots__'):
values = map(lambda x: getattr(obj, x), obj.__slots__)
state = dict(zip(obj.__slots__, values))
elif hasattr(obj, '__dict__'):
state = obj.__dict__
else:
if not hasattr(obj, '__class__'):
raise TypeError('Cannot encode object: %s' % repr(obj))
state = {Types.REPR: repr(obj)}
state[Types.OBJECT] = getImportPath(obj)
return convertObjects(state)
def objectDecoderHook(obj):
obj = restoreKeysPostDecoding(obj)
if isinstance(obj, dict):
if Types.TUPLE in obj:
return tuple(obj[Types.TUPLE])
elif Types.SET in obj:
return set(obj[Types.SET])
elif Types.DATETIME in obj:
return eval(obj[Types.DATETIME])
elif Types.REPR in obj:
module, name = obj[Types.OBJECT].rsplit('.', 1)
return eval(obj[Types.REPR], {module: __import__(module)})
elif Types.OBJECT in obj:
module, name = obj[Types.OBJECT].rsplit('.', 1)
__import__(module)
cls = getattr(sys.modules[module], name)
try:
if hasattr(cls, '__new__'):
instance = cls.__new__(cls)
else:
instance = object.__new__(cls)
except TypeError:
try:
instance = cls()
except TypeError:
raise TypeError('Old style class cannot be instantiated: %s' %
obj[Types.OBJECT])
attrs = obj
del attrs[Types.OBJECT]
if hasattr(instance, '__setstate__'):
instance.__setstate__(attrs)
else:
for k, v in attrs.iteritems():
setattr(instance, k, v)
return instance
return obj
def clean(s):
"""Removes trailing whitespace on each line."""
lines = [l.rstrip() for l in s.split('\n')]
return '\n'.join(lines)
def dumps(obj, **kwargs):
return clean(json.dumps(convertObjects(obj), **kwargs))
def dump(obj, f, **kwargs):
f.write(dumps(obj, **kwargs))
def loads(s, **kwargs):
return restoreKeysPostDecoding(
json.loads(s, object_hook=objectDecoderHook, **kwargs))
def load(f, **kwargs):
return restoreKeysPostDecoding(
json.load(f, object_hook=objectDecoderHook, **kwargs))
| agpl-3.0 |
zarboz/Evita-Jellybean | Documentation/target/tcm_mod_builder.py | 4981 | 41422 | #!/usr/bin/python
# The TCM v4 multi-protocol fabric module generation script for drivers/target/$NEW_MOD
#
# Copyright (c) 2010 Rising Tide Systems
# Copyright (c) 2010 Linux-iSCSI.org
#
# Author: [email protected]
#
import os, sys
import subprocess as sub
import string
import re
import optparse
tcm_dir = ""
fabric_ops = []
fabric_mod_dir = ""
fabric_mod_port = ""
fabric_mod_init_port = ""
def tcm_mod_err(msg):
print msg
sys.exit(1)
def tcm_mod_create_module_subdir(fabric_mod_dir_var):
if os.path.isdir(fabric_mod_dir_var) == True:
return 1
print "Creating fabric_mod_dir: " + fabric_mod_dir_var
ret = os.mkdir(fabric_mod_dir_var)
if ret:
tcm_mod_err("Unable to mkdir " + fabric_mod_dir_var)
return
def tcm_mod_build_FC_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for FC Initiator Nport */\n"
buf += " u64 nport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Initiator Nport */\n"
buf += " char nport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* FC lport target portal group tag for TCM */\n"
buf += " u16 lport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_lport */\n"
buf += " struct " + fabric_mod_name + "_lport *lport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_lport {\n"
buf += " /* SCSI protocol the lport is providing */\n"
buf += " u8 lport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for FC Target Lport */\n"
buf += " u64 lport_wwpn;\n"
buf += " /* ASCII formatted WWPN for FC Target Lport */\n"
buf += " char lport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_lport() */\n"
buf += " struct se_wwn lport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "lport"
fabric_mod_init_port = "nport"
return
def tcm_mod_build_SAS_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* Binary World Wide unique Port Name for SAS Initiator port */\n"
buf += " u64 iport_wwpn;\n"
buf += " /* ASCII formatted WWPN for Sas Initiator port */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* SAS port target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* Binary World Wide unique Port Name for SAS Target port */\n"
buf += " u64 tport_wwpn;\n"
buf += " /* ASCII formatted WWPN for SAS Target port */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_iSCSI_include(fabric_mod_dir_var, fabric_mod_name):
global fabric_mod_port
global fabric_mod_init_port
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_base.h"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#define " + fabric_mod_name.upper() + "_VERSION \"v0.1\"\n"
buf += "#define " + fabric_mod_name.upper() + "_NAMELEN 32\n"
buf += "\n"
buf += "struct " + fabric_mod_name + "_nacl {\n"
buf += " /* ASCII formatted InitiatorName */\n"
buf += " char iport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_nodeacl() */\n"
buf += " struct se_node_acl se_node_acl;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tpg {\n"
buf += " /* iSCSI target portal group tag for TCM */\n"
buf += " u16 tport_tpgt;\n"
buf += " /* Pointer back to " + fabric_mod_name + "_tport */\n"
buf += " struct " + fabric_mod_name + "_tport *tport;\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tpg() */\n"
buf += " struct se_portal_group se_tpg;\n"
buf += "};\n\n"
buf += "struct " + fabric_mod_name + "_tport {\n"
buf += " /* SCSI protocol the tport is providing */\n"
buf += " u8 tport_proto_id;\n"
buf += " /* ASCII formatted TargetName for IQN */\n"
buf += " char tport_name[" + fabric_mod_name.upper() + "_NAMELEN];\n"
buf += " /* Returned by " + fabric_mod_name + "_make_tport() */\n"
buf += " struct se_wwn tport_wwn;\n"
buf += "};\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
fabric_mod_port = "tport"
fabric_mod_init_port = "iport"
return
def tcm_mod_build_base_includes(proto_ident, fabric_mod_dir_val, fabric_mod_name):
if proto_ident == "FC":
tcm_mod_build_FC_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "SAS":
tcm_mod_build_SAS_include(fabric_mod_dir_val, fabric_mod_name)
elif proto_ident == "iSCSI":
tcm_mod_build_iSCSI_include(fabric_mod_dir_val, fabric_mod_name)
else:
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
return
def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_configfs.c"
print "Writing file: " + f
p = open(f, 'w');
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "#include <linux/module.h>\n"
buf += "#include <linux/moduleparam.h>\n"
buf += "#include <linux/version.h>\n"
buf += "#include <generated/utsrelease.h>\n"
buf += "#include <linux/utsname.h>\n"
buf += "#include <linux/init.h>\n"
buf += "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/configfs.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_fabric_configfs.h>\n"
buf += "#include <target/target_core_configfs.h>\n"
buf += "#include <target/configfs_macros.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "/* Local pointer to allocated TCM configfs fabric module */\n"
buf += "struct target_fabric_configfs *" + fabric_mod_name + "_fabric_configfs;\n\n"
buf += "static struct se_node_acl *" + fabric_mod_name + "_make_nodeacl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct se_node_acl *se_nacl, *se_nacl_new;\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n"
buf += " u32 nexus_depth;\n\n"
buf += " /* " + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n"
buf += " se_nacl_new = " + fabric_mod_name + "_alloc_fabric_acl(se_tpg);\n"
buf += " if (!se_nacl_new)\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += "//#warning FIXME: Hardcoded nexus depth in " + fabric_mod_name + "_make_nodeacl()\n"
buf += " nexus_depth = 1;\n"
buf += " /*\n"
buf += " * se_nacl_new may be released by core_tpg_add_initiator_node_acl()\n"
buf += " * when converting a NodeACL from demo mode -> explict\n"
buf += " */\n"
buf += " se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,\n"
buf += " name, nexus_depth);\n"
buf += " if (IS_ERR(se_nacl)) {\n"
buf += " " + fabric_mod_name + "_release_fabric_acl(se_tpg, se_nacl_new);\n"
buf += " return se_nacl;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Locate our struct " + fabric_mod_name + "_nacl and set the FC Nport WWPN\n"
buf += " */\n"
buf += " nacl = container_of(se_nacl, struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " nacl->" + fabric_mod_init_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&nacl->" + fabric_mod_init_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return se_nacl;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_nodeacl(struct se_node_acl *se_acl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_acl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " core_tpg_del_initiator_node_acl(se_acl->se_tpg, se_acl, 1);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
buf += "static struct se_portal_group *" + fabric_mod_name + "_make_tpg(\n"
buf += " struct se_wwn *wwn,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + "*" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg;\n"
buf += " unsigned long tpgt;\n"
buf += " int ret;\n\n"
buf += " if (strstr(name, \"tpgt_\") != name)\n"
buf += " return ERR_PTR(-EINVAL);\n"
buf += " if (strict_strtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)\n"
buf += " return ERR_PTR(-EINVAL);\n\n"
buf += " tpg = kzalloc(sizeof(struct " + fabric_mod_name + "_tpg), GFP_KERNEL);\n"
buf += " if (!tpg) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_tpg\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
buf += " ret = core_tpg_register(&" + fabric_mod_name + "_fabric_configfs->tf_ops, wwn,\n"
buf += " &tpg->se_tpg, (void *)tpg,\n"
buf += " TRANSPORT_TPG_TYPE_NORMAL);\n"
buf += " if (ret < 0) {\n"
buf += " kfree(tpg);\n"
buf += " return NULL;\n"
buf += " }\n"
buf += " return &tpg->se_tpg;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_tpg(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n\n"
buf += " core_tpg_deregister(se_tpg);\n"
buf += " kfree(tpg);\n"
buf += "}\n\n"
buf += "static struct se_wwn *" + fabric_mod_name + "_make_" + fabric_mod_port + "(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " struct config_group *group,\n"
buf += " const char *name)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + ";\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " u64 wwpn = 0;\n\n"
buf += " /* if (" + fabric_mod_name + "_parse_wwn(name, &wwpn, 1) < 0)\n"
buf += " return ERR_PTR(-EINVAL); */\n\n"
buf += " " + fabric_mod_port + " = kzalloc(sizeof(struct " + fabric_mod_name + "_" + fabric_mod_port + "), GFP_KERNEL);\n"
buf += " if (!" + fabric_mod_port + ") {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_" + fabric_mod_port + "\");\n"
buf += " return ERR_PTR(-ENOMEM);\n"
buf += " }\n"
if proto_ident == "FC" or proto_ident == "SAS":
buf += " " + fabric_mod_port + "->" + fabric_mod_port + "_wwpn = wwpn;\n"
buf += " /* " + fabric_mod_name + "_format_wwn(&" + fabric_mod_port + "->" + fabric_mod_port + "_name[0], " + fabric_mod_name.upper() + "_NAMELEN, wwpn); */\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_wwn;\n"
buf += "}\n\n"
buf += "static void " + fabric_mod_name + "_drop_" + fabric_mod_port + "(struct se_wwn *wwn)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = container_of(wwn,\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + ", " + fabric_mod_port + "_wwn);\n"
buf += " kfree(" + fabric_mod_port + ");\n"
buf += "}\n\n"
buf += "static ssize_t " + fabric_mod_name + "_wwn_show_attr_version(\n"
buf += " struct target_fabric_configfs *tf,\n"
buf += " char *page)\n"
buf += "{\n"
buf += " return sprintf(page, \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \"on \"UTS_RELEASE\"\\n\", " + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += "}\n\n"
buf += "TF_WWN_ATTR_RO(" + fabric_mod_name + ", version);\n\n"
buf += "static struct configfs_attribute *" + fabric_mod_name + "_wwn_attrs[] = {\n"
buf += " &" + fabric_mod_name + "_wwn_version.attr,\n"
buf += " NULL,\n"
buf += "};\n\n"
buf += "static struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
buf += " .get_fabric_proto_ident = " + fabric_mod_name + "_get_fabric_proto_ident,\n"
buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
buf += " .tpg_get_default_depth = " + fabric_mod_name + "_get_default_depth,\n"
buf += " .tpg_get_pr_transport_id = " + fabric_mod_name + "_get_pr_transport_id,\n"
buf += " .tpg_get_pr_transport_id_len = " + fabric_mod_name + "_get_pr_transport_id_len,\n"
buf += " .tpg_parse_pr_out_transport_id = " + fabric_mod_name + "_parse_pr_out_transport_id,\n"
buf += " .tpg_check_demo_mode = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_check_demo_mode_cache = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_demo_mode_write_protect = " + fabric_mod_name + "_check_true,\n"
buf += " .tpg_check_prod_mode_write_protect = " + fabric_mod_name + "_check_false,\n"
buf += " .tpg_alloc_fabric_acl = " + fabric_mod_name + "_alloc_fabric_acl,\n"
buf += " .tpg_release_fabric_acl = " + fabric_mod_name + "_release_fabric_acl,\n"
buf += " .tpg_get_inst_index = " + fabric_mod_name + "_tpg_get_inst_index,\n"
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
buf += " .write_pending_status = " + fabric_mod_name + "_write_pending_status,\n"
buf += " .set_default_node_attributes = " + fabric_mod_name + "_set_default_node_attrs,\n"
buf += " .get_task_tag = " + fabric_mod_name + "_get_task_tag,\n"
buf += " .get_cmd_state = " + fabric_mod_name + "_get_cmd_state,\n"
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
buf += " .get_fabric_sense_len = " + fabric_mod_name + "_get_fabric_sense_len,\n"
buf += " .set_fabric_sense_len = " + fabric_mod_name + "_set_fabric_sense_len,\n"
buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
buf += " .fabric_make_wwn = " + fabric_mod_name + "_make_" + fabric_mod_port + ",\n"
buf += " .fabric_drop_wwn = " + fabric_mod_name + "_drop_" + fabric_mod_port + ",\n"
buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
buf += " .fabric_post_link = NULL,\n"
buf += " .fabric_pre_unlink = NULL,\n"
buf += " .fabric_make_np = NULL,\n"
buf += " .fabric_drop_np = NULL,\n"
buf += " .fabric_make_nodeacl = " + fabric_mod_name + "_make_nodeacl,\n"
buf += " .fabric_drop_nodeacl = " + fabric_mod_name + "_drop_nodeacl,\n"
buf += "};\n\n"
buf += "static int " + fabric_mod_name + "_register_configfs(void)\n"
buf += "{\n"
buf += " struct target_fabric_configfs *fabric;\n"
buf += " int ret;\n\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + " fabric module %s on %s/%s\"\n"
buf += " \" on \"UTS_RELEASE\"\\n\"," + fabric_mod_name.upper() + "_VERSION, utsname()->sysname,\n"
buf += " utsname()->machine);\n"
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup fabric->tf_ops from our local " + fabric_mod_name + "_ops\n"
buf += " */\n"
buf += " fabric->tf_ops = " + fabric_mod_name + "_ops;\n"
buf += " /*\n"
buf += " * Setup default attribute lists for various fabric->tf_cit_tmpl\n"
buf += " */\n"
buf += " TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = " + fabric_mod_name + "_wwn_attrs;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;\n"
buf += " TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;\n"
buf += " /*\n"
buf += " * Register the fabric for use within TCM\n"
buf += " */\n"
buf += " ret = target_fabric_configfs_register(fabric);\n"
buf += " if (ret < 0) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_register() failed\"\n"
buf += " \" for " + fabric_mod_name.upper() + "\\n\");\n"
buf += " return ret;\n"
buf += " }\n"
buf += " /*\n"
buf += " * Setup our local pointer to *fabric\n"
buf += " */\n"
buf += " " + fabric_mod_name + "_fabric_configfs = fabric;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Set fabric -> " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_deregister_configfs(void)\n"
buf += "{\n"
buf += " if (!" + fabric_mod_name + "_fabric_configfs)\n"
buf += " return;\n\n"
buf += " target_fabric_configfs_deregister(" + fabric_mod_name + "_fabric_configfs);\n"
buf += " " + fabric_mod_name + "_fabric_configfs = NULL;\n"
buf += " printk(KERN_INFO \"" + fabric_mod_name.upper() + "[0] - Cleared " + fabric_mod_name + "_fabric_configfs\\n\");\n"
buf += "};\n\n"
buf += "static int __init " + fabric_mod_name + "_init(void)\n"
buf += "{\n"
buf += " int ret;\n\n"
buf += " ret = " + fabric_mod_name + "_register_configfs();\n"
buf += " if (ret < 0)\n"
buf += " return ret;\n\n"
buf += " return 0;\n"
buf += "};\n\n"
buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
buf += "{\n"
buf += " " + fabric_mod_name + "_deregister_configfs();\n"
buf += "};\n\n"
buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
buf += "MODULE_LICENSE(\"GPL\");\n"
buf += "module_init(" + fabric_mod_name + "_init);\n"
buf += "module_exit(" + fabric_mod_name + "_exit);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_scan_fabric_ops(tcm_dir):
fabric_ops_api = tcm_dir + "include/target/target_core_fabric.h"
print "Using tcm_mod_scan_fabric_ops: " + fabric_ops_api
process_fo = 0;
p = open(fabric_ops_api, 'r')
line = p.readline()
while line:
if process_fo == 0 and re.search('struct target_core_fabric_ops {', line):
line = p.readline()
continue
if process_fo == 0:
process_fo = 1;
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
continue
line = p.readline()
# Search for function pointer
if not re.search('\(\*', line):
continue
fabric_ops.append(line.rstrip())
p.close()
return
def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf = ""
bufi = ""
f = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.c"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
fi = fabric_mod_dir_var + "/" + fabric_mod_name + "_fabric.h"
print "Writing file: " + fi
pi = open(fi, 'w')
if not pi:
tcm_mod_err("Unable to open file: " + fi)
buf = "#include <linux/slab.h>\n"
buf += "#include <linux/kthread.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/list.h>\n"
buf += "#include <linux/types.h>\n"
buf += "#include <linux/string.h>\n"
buf += "#include <linux/ctype.h>\n"
buf += "#include <asm/unaligned.h>\n"
buf += "#include <scsi/scsi.h>\n"
buf += "#include <scsi/scsi_host.h>\n"
buf += "#include <scsi/scsi_device.h>\n"
buf += "#include <scsi/scsi_cmnd.h>\n"
buf += "#include <scsi/libfc.h>\n\n"
buf += "#include <target/target_core_base.h>\n"
buf += "#include <target/target_core_fabric.h>\n"
buf += "#include <target/target_core_configfs.h>\n\n"
buf += "#include \"" + fabric_mod_name + "_base.h\"\n"
buf += "#include \"" + fabric_mod_name + "_fabric.h\"\n\n"
buf += "int " + fabric_mod_name + "_check_true(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_true(struct se_portal_group *);\n"
buf += "int " + fabric_mod_name + "_check_false(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_check_false(struct se_portal_group *);\n"
total_fabric_ops = len(fabric_ops)
i = 0
while i < total_fabric_ops:
fo = fabric_ops[i]
i += 1
# print "fabric_ops: " + fo
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
buf += " return \"" + fabric_mod_name[4:] + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
if re.search('get_fabric_proto_ident', fo):
buf += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " u8 proto_id;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " proto_id = fc_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " proto_id = sas_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " proto_id = iscsi_get_fabric_proto_ident(se_tpg);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return proto_id;\n"
buf += "}\n\n"
bufi += "u8 " + fabric_mod_name + "_get_fabric_proto_ident(struct se_portal_group *);\n"
if re.search('get_wwn', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n\n"
buf += " return &" + fabric_mod_port + "->" + fabric_mod_port + "_name[0];\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_wwn(struct se_portal_group *);\n"
if re.search('get_tag', fo):
buf += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " return tpg->" + fabric_mod_port + "_tpgt;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_tag(struct se_portal_group *);\n"
if re.search('get_default_depth', fo):
buf += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_default_depth(struct se_portal_group *);\n"
if re.search('get_pr_transport_id\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code,\n"
buf += " unsigned char *buf)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code, buf);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *, unsigned char *);\n"
if re.search('get_pr_transport_id_len\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl,\n"
buf += " struct t10_pr_registration *pr_reg,\n"
buf += " int *format_code)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " int ret = 0;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " ret = sas_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " ret = iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,\n"
buf += " format_code);\n"
buf += " break;\n"
buf += " }\n\n"
buf += " return ret;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_pr_transport_id_len(struct se_portal_group *,\n"
bufi += " struct se_node_acl *, struct t10_pr_registration *,\n"
bufi += " int *);\n"
if re.search('parse_pr_out_transport_id\)\(', fo):
buf += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " const char *buf,\n"
buf += " u32 *out_tid_len,\n"
buf += " char **port_nexus_ptr)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_tpg *tpg = container_of(se_tpg,\n"
buf += " struct " + fabric_mod_name + "_tpg, se_tpg);\n"
buf += " struct " + fabric_mod_name + "_" + fabric_mod_port + " *" + fabric_mod_port + " = tpg->" + fabric_mod_port + ";\n"
buf += " char *tid = NULL;\n\n"
buf += " switch (" + fabric_mod_port + "->" + fabric_mod_port + "_proto_id) {\n"
if proto_ident == "FC":
buf += " case SCSI_PROTOCOL_FCP:\n"
buf += " default:\n"
buf += " tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "SAS":
buf += " case SCSI_PROTOCOL_SAS:\n"
buf += " default:\n"
buf += " tid = sas_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
elif proto_ident == "iSCSI":
buf += " case SCSI_PROTOCOL_ISCSI:\n"
buf += " default:\n"
buf += " tid = iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,\n"
buf += " port_nexus_ptr);\n"
buf += " }\n\n"
buf += " return tid;\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_parse_pr_out_transport_id(struct se_portal_group *,\n"
bufi += " const char *, u32 *, char **);\n"
if re.search('alloc_fabric_acl\)\(', fo):
buf += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl;\n\n"
buf += " nacl = kzalloc(sizeof(struct " + fabric_mod_name + "_nacl), GFP_KERNEL);\n"
buf += " if (!nacl) {\n"
buf += " printk(KERN_ERR \"Unable to allocate struct " + fabric_mod_name + "_nacl\\n\");\n"
buf += " return NULL;\n"
buf += " }\n\n"
buf += " return &nacl->se_node_acl;\n"
buf += "}\n\n"
bufi += "struct se_node_acl *" + fabric_mod_name + "_alloc_fabric_acl(struct se_portal_group *);\n"
if re.search('release_fabric_acl\)\(', fo):
buf += "void " + fabric_mod_name + "_release_fabric_acl(\n"
buf += " struct se_portal_group *se_tpg,\n"
buf += " struct se_node_acl *se_nacl)\n"
buf += "{\n"
buf += " struct " + fabric_mod_name + "_nacl *nacl = container_of(se_nacl,\n"
buf += " struct " + fabric_mod_name + "_nacl, se_node_acl);\n"
buf += " kfree(nacl);\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_fabric_acl(struct se_portal_group *,\n"
bufi += " struct se_node_acl *);\n"
if re.search('tpg_get_inst_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *se_tpg)\n"
buf += "{\n"
buf += " return 1;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_tpg_get_inst_index(struct se_portal_group *);\n"
if re.search('\*release_cmd\)\(', fo):
buf += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_release_cmd(struct se_cmd *);\n"
if re.search('shutdown_session\)\(', fo):
buf += "int " + fabric_mod_name + "_shutdown_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_shutdown_session(struct se_session *);\n"
if re.search('close_session\)\(', fo):
buf += "void " + fabric_mod_name + "_close_session(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
if re.search('stop_session\)\(', fo):
buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
if re.search('fall_back_to_erl0\)\(', fo):
buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
if re.search('sess_logged_in\)\(', fo):
buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *);\n"
if re.search('write_pending\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending(struct se_cmd *);\n"
if re.search('write_pending_status\)\(', fo):
buf += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_write_pending_status(struct se_cmd *);\n"
if re.search('set_default_node_attributes\)\(', fo):
buf += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *nacl)\n"
buf += "{\n"
buf += " return;\n"
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_set_default_node_attrs(struct se_node_acl *);\n"
if re.search('get_task_tag\)\(', fo):
buf += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u32 " + fabric_mod_name + "_get_task_tag(struct se_cmd *);\n"
if re.search('get_cmd_state\)\(', fo):
buf += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_get_cmd_state(struct se_cmd *);\n"
if re.search('queue_data_in\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_data_in(struct se_cmd *);\n"
if re.search('queue_status\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_status(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
if re.search('get_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_get_fabric_sense_len(void);\n"
if re.search('set_fabric_sense_len\)\(', fo):
buf += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *se_cmd, u32 sense_length)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "u16 " + fabric_mod_name + "_set_fabric_sense_len(struct se_cmd *, u32);\n"
if re.search('is_state_remove\)\(', fo):
buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
buf += "{\n"
buf += " return 0;\n"
buf += "}\n\n"
bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
ret = pi.write(bufi)
if ret:
tcm_mod_err("Unable to write fi: " + fi)
pi.close()
return
def tcm_mod_build_kbuild(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Makefile"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf += fabric_mod_name + "-objs := " + fabric_mod_name + "_fabric.o \\\n"
buf += " " + fabric_mod_name + "_configfs.o\n"
buf += "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name + ".o\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_build_kconfig(fabric_mod_dir_var, fabric_mod_name):
buf = ""
f = fabric_mod_dir_var + "/Kconfig"
print "Writing file: " + f
p = open(f, 'w')
if not p:
tcm_mod_err("Unable to open file: " + f)
buf = "config " + fabric_mod_name.upper() + "\n"
buf += " tristate \"" + fabric_mod_name.upper() + " fabric module\"\n"
buf += " depends on TARGET_CORE && CONFIGFS_FS\n"
buf += " default n\n"
buf += " ---help---\n"
buf += " Say Y here to enable the " + fabric_mod_name.upper() + " fabric module\n"
ret = p.write(buf)
if ret:
tcm_mod_err("Unable to write f: " + f)
p.close()
return
def tcm_mod_add_kbuild(tcm_dir, fabric_mod_name):
buf = "obj-$(CONFIG_" + fabric_mod_name.upper() + ") += " + fabric_mod_name.lower() + "/\n"
kbuild = tcm_dir + "/drivers/target/Makefile"
f = open(kbuild, 'a')
f.write(buf)
f.close()
return
def tcm_mod_add_kconfig(tcm_dir, fabric_mod_name):
buf = "source \"drivers/target/" + fabric_mod_name.lower() + "/Kconfig\"\n"
kconfig = tcm_dir + "/drivers/target/Kconfig"
f = open(kconfig, 'a')
f.write(buf)
f.close()
return
def main(modname, proto_ident):
# proto_ident = "FC"
# proto_ident = "SAS"
# proto_ident = "iSCSI"
tcm_dir = os.getcwd();
tcm_dir += "/../../"
print "tcm_dir: " + tcm_dir
fabric_mod_name = modname
fabric_mod_dir = tcm_dir + "drivers/target/" + fabric_mod_name
print "Set fabric_mod_name: " + fabric_mod_name
print "Set fabric_mod_dir: " + fabric_mod_dir
print "Using proto_ident: " + proto_ident
if proto_ident != "FC" and proto_ident != "SAS" and proto_ident != "iSCSI":
print "Unsupported proto_ident: " + proto_ident
sys.exit(1)
ret = tcm_mod_create_module_subdir(fabric_mod_dir)
if ret:
print "tcm_mod_create_module_subdir() failed because module already exists!"
sys.exit(1)
tcm_mod_build_base_includes(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_scan_fabric_ops(tcm_dir)
tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_configfs(proto_ident, fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
return
parser = optparse.OptionParser()
parser.add_option('-m', '--modulename', help='Module name', dest='modname',
action='store', nargs=1, type='string')
parser.add_option('-p', '--protoident', help='Protocol Ident', dest='protoident',
action='store', nargs=1, type='string')
(opts, args) = parser.parse_args()
mandatories = ['modname', 'protoident']
for m in mandatories:
if not opts.__dict__[m]:
print "mandatory option is missing\n"
parser.print_help()
exit(-1)
if __name__ == "__main__":
main(str(opts.modname), opts.protoident)
| gpl-2.0 |
verma-varsha/zulip | zerver/tests/test_type_debug.py | 1 | 5027 | from __future__ import print_function
import sys
from unittest import TestCase
from six.moves import cStringIO as StringIO
from zerver.lib.type_debug import print_types
from typing import Any, Callable, Dict, Iterable, Tuple, TypeVar
T = TypeVar('T')
def add(x=0, y=0):
# type: (Any, Any) -> Any
return x + y
def to_dict(v=[]):
# type: (Iterable[Tuple[Any, Any]]) -> Dict[Any, Any]
return dict(v)
class TypesPrintTest(TestCase):
# These 2 methods are needed to run tests with our custom test-runner
def _pre_setup(self):
# type: () -> None
pass
def _post_teardown(self):
# type: () -> None
pass
def check_signature(self, signature, retval, func, *args, **kwargs):
# type: (str, T, Callable[..., T], *Any, **Any) -> None
"""
Checks if print_types outputs `signature` when func is called with *args and **kwargs.
Do not decorate func with print_types before passing into this function.
func will be decorated with print_types within this function.
"""
try:
original_stdout = sys.stdout
sys.stdout = StringIO()
self.assertEqual(retval, print_types(func)(*args, **kwargs))
self.assertEqual(sys.stdout.getvalue().strip(), signature)
finally:
sys.stdout = original_stdout
def test_empty(self):
# type: () -> None
def empty_func():
# type: () -> None
pass
self.check_signature("empty_func() -> None", None, empty_func)
self.check_signature("<lambda>() -> None", None, (lambda: None)) # type: ignore # https://github.com/python/mypy/issues/1932
def test_basic(self):
# type: () -> None
self.check_signature("add(float, int) -> float",
5.0, add, 2.0, 3)
self.check_signature("add(float, y=int) -> float",
5.0, add, 2.0, y=3)
self.check_signature("add(x=int) -> int", 2, add, x=2)
self.check_signature("add() -> int", 0, add)
def test_list(self):
# type: () -> None
self.check_signature("add([], [str]) -> [str]",
['two'], add, [], ['two'])
self.check_signature("add([int], [str]) -> [int, ...]",
[2, 'two'], add, [2], ['two'])
self.check_signature("add([int, ...], y=[]) -> [int, ...]",
[2, 'two'], add, [2, 'two'], y=[])
def test_dict(self):
# type: () -> None
self.check_signature("to_dict() -> {}", {}, to_dict)
self.check_signature("to_dict([(int, str)]) -> {int: str}",
{2: 'two'}, to_dict, [(2, 'two')])
self.check_signature("to_dict(((int, str),)) -> {int: str}",
{2: 'two'}, to_dict, ((2, 'two'),))
self.check_signature("to_dict([(int, str), ...]) -> {int: str, ...}",
{1: 'one', 2: 'two'}, to_dict, [(1, 'one'), (2, 'two')])
def test_tuple(self):
# type: () -> None
self.check_signature("add((), ()) -> ()",
(), add, (), ())
self.check_signature("add((int,), (str,)) -> (int, str)",
(1, 'one'), add, (1,), ('one',))
self.check_signature("add(((),), ((),)) -> ((), ())",
((), ()), add, ((),), ((),))
def test_class(self):
# type: () -> None
class A(object):
pass
class B(str):
pass
self.check_signature("<lambda>(A) -> str", 'A', (lambda x: x.__class__.__name__), A())
self.check_signature("<lambda>(B) -> int", 5, (lambda x: len(x)), B("hello"))
def test_sequence(self):
# type: () -> None
class A(list):
pass
class B(list):
pass
self.check_signature("add(A([]), B([str])) -> [str]",
['two'], add, A([]), B(['two']))
self.check_signature("add(A([int]), B([str])) -> [int, ...]",
[2, 'two'], add, A([2]), B(['two']))
self.check_signature("add(A([int, ...]), y=B([])) -> [int, ...]",
[2, 'two'], add, A([2, 'two']), y=B([]))
def test_mapping(self):
# type: () -> None
class A(dict):
pass
def to_A(v=[]):
# type: (Iterable[Tuple[Any, Any]]) -> A
return A(v)
self.check_signature("to_A() -> A([])", A(()), to_A)
self.check_signature("to_A([(int, str)]) -> A([(int, str)])",
{2: 'two'}, to_A, [(2, 'two')])
self.check_signature("to_A([(int, str), ...]) -> A([(int, str), ...])",
{1: 'one', 2: 'two'}, to_A, [(1, 'one'), (2, 'two')])
self.check_signature("to_A(((int, str), (int, str))) -> A([(int, str), ...])",
{1: 'one', 2: 'two'}, to_A, ((1, 'one'), (2, 'two')))
| apache-2.0 |
aravindvenkatesan/AgroLD-scripts | AgroLD_ETL/riceKBpipeline.py | 1 | 10862 | #!/usr/bin/env python
import glob
#from Bio.UniProt import GOA
import pprint
#from riceKB import * #southGreenParsers #oryzaBaseParser gafToRDF grameneParsers TairParser grameneParsers
#import riceKB
import os
import re
from riceKB import gafToRDF, grameneParsers, oryzaBaseParser, TairParser,\
southGreenParsers, uniprotToRDF, TropgeneParser, TropgeneModel, gffParser, os_japonicaModel, \
os_indicaModel, a_thalianaModel, sniplayPaserModel
#from riceKB.oryzaBaseParser import oryzaBaseParser
'''
Input directory path
'''
'''
Ontology association directory/files
'''
eco_map_file = '/home/venkatesan/workspace/explore/test_files/ontology_associations/gaf-eco-mapping.txt'
prot_assoc_test_dir = '/home/venkatesan/workspace/explore/test_files/ontology_associations/protein_associations/*.*'
gene_assoc_test_dir = '/home/venkatesan/workspace/explore/test_files/ontology_associations/gene_associations/*.*'
qtl_assoc_test_dir = '/home/venkatesan/workspace/explore/test_files/ontology_associations/qtl_associations/*.*'
#aracyc_file = '/home/venkatesan/workspace/explore/test_files/tair/aracyc/aracyc_pathways.20130709.txt'
protein_assoc_dir = '/media/sf_F_DRIVE/IBC/argoLD_project/data/ontology_associations/protein/*.*' # /home/venkatesan/workspace/argoLD_project/data/ontology_associations/protein
gene_assoc_dir = '/home/venkatesan/workspace/argoLD_project/data/ontology_associations/gene/*.*'
qtl_assoc_dir = '/media/sf_F_DRIVE/IBC/argoLD_project/data/ontology_associations/qtl/*.*'
'''
Gramene genes and QTL directory/files
'''
gramene_g_test_dir = '/home/venkatesan/workspace/explore/test_files/gramene_genes/*.txt' #*.txt Oryza_sativa_japonica.txt' #meridionalis barthii sativa_indica sativa_japonica
gramene_qtl_test_file = '/home/venkatesan/workspace/explore/test_files/gramene_qtl/Rice_QTL.dat'
gramene_genes_files = '/media/sf_F_DRIVE/IBC/argoLD_project/data/gramene_genes/*.txt'
gramene_qtl_file = '/media/sf_F_DRIVE/IBC/argoLD_project/data/gramene_qtl/Rice_QTL.dat'
'''
OryzaBaseDB
'''
oryzabase_test_file = '/home/venkatesan/workspace/explore/test_files/oryzabase_genes/OryzabaseGeneListEn_20140621.txt'
oryzabase_file = '/home/venkatesan/workspace/argoLD_project/data/oryzabase/OryzabaseGeneListEn_20141206.txt'
'''
AraCyc
'''
aracyc_test_file = '/home/venkatesan/workspace/explore/test_files/tair/aracyc/aracyc_pathways.20130709.txt'
aracyc_file = '/home/venkatesan/workspace/argoLD_project/data/tair/aracyc/aracyc_pathways.20130709'
'''
RiceCyc
'''
ricecyc_test_dir = '/home/venkatesan/workspace/explore/test_files/gramene_ricecyc/*.tab'
ricecyc_file = '/media/sf_F_DRIVE/IBC/argoLD_project/data/ricecyc/*.tab'
'''
OryzaTagLine input
'''
otl_test_inputfile = '/home/venkatesan/workspace/explore/test_files/oryzaTagLine/OTL_export_pheno+trait.csv'
otl_inputfile = '/media/sf_F_DRIVE/IBC/argoLD_project/data/southgreen/oryzatagline/OTL_export_pheno+trait.csv'
'''
UniProt input
'''
up_test_dir = '/home/venkatesan/workspace/explore/test_files/uniprot/*.dat'
up_dir = '/media/sf_F_DRIVE/IBC/argoLD_project/data/uniport/*.dat'
#########################################################
'''
Output file path - RDF files - turtle syntax
'''
'''
Ontology association files
'''
prot_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/associations_ttl/protein_associations.ttl'
gene_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/associations_ttl/gene_associations.ttl'
qtl_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/associations_ttl/qtl_associations.ttl'
protein_assoc_ttl = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/ontology_associations_ttl/protein_ttl/protein_associations.ttl'
gene_assoc_ttl = '/home/venkatesan/workspace/argoLD_project/rdf/ontology_associations_ttl/gene_ttl/gene_associations.ttl'
qtl_assoc_ttl = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/ontology_associations_ttl/qtl_ttl/qtl_associations.ttl'
'''
Gramene genes/QTL files
'''
genomes_rdf_test_out = '/home/venkatesan/workspace/explore/rdf_ttl/gramene_genome_ttl/'
gramene_qtl_test_out = '/home/venkatesan/workspace/explore/rdf_ttl/gramene_qtl_ttl/'
gramene_genes_out = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/gramene_genes_ttl/'
gramene_qtl_out = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/gramene_qtl_ttl/'
'''
OryzaBaseDB file
'''
oryzaBase_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/oryzabase_ttl/oryzabase_genes.ttl'
oryzaBase_output = '/home/venkatesan/workspace/argoLD_project/rdf/oryzabase_ttl/oryzabase_genes.ttl'
#oryzaBase_output = '/home/venkatesan/Documents/data_samples/oryzabase/oryzabase_genes.ttl'
'''
AraCyc
'''
aracyc_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/tair/aracyc/'
aracyc_output = '/home/venkatesan/workspace/argoLD_project/rdf/tair_ttl/aracyc_ttl/'
'''
RiceCyc
'''
ricecyc_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/gramene_ricecyc_ttl/'
ricecyc_output = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/cyc_ttl/'
'''
OryzaTagLine
'''
otl_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/southgreen/oryzaTagLine/otl.ttl'
otl_output = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/southgreen/oryzatagline_ttl/otl.ttl'
'''
UniprotKB
'''
up_test_output = '/home/venkatesan/workspace/explore/rdf_ttl/uniprot_ttl/'
up_output = '/media/sf_F_DRIVE/IBC/argoLD_project/rdf/uniprot_ttl/'
pp = pprint.PrettyPrinter(indent=4)
#prot_gaf_files = glob.glob(protein_assoc_dir) #gene_assoc_dir stores file names(with the full path) as a list
#prot_gaf_files = glob.glob(prot_assoc_test_dir) #gene_assoc_dir stores file names(with the full path) as a list
#gene_gaf_files = glob.glob(gene_assoc_dir)
#gene_gaf_files = glob.glob(gene_assoc_input_dir)
#qtl_gaf_files = glob.glob(qtl_assoc_dir)
#goa = gafToRDF
#print "************** Protein-ontology associations *************\n"
#mapping = goa.gafEcoMap(eco_map_file)
#goa.allGafRDF(prot_gaf_files, prot_output_file, 'qtl') #
#goa.ProteinGafRDF(prot_gaf_files, mapping, protein_assoc_ttl) # allGafRDF(prot_gaf_files, protein_assoc_ttl, 'protein')
#print "************** Protein-ontology associations RDF converted *************\n\n"
#print "************** QTL-ontology associations *************\n"
#goa.allGafRDF(qtl_gaf_files, mapping, qtl_assoc_ttl, 'qtl')
#print "************** QTL-ontology associations RDF converted *************\n\n"
#gaf_eco_file = '/home/venkatesan/workspace/explore/test_files/ontology_associations/gaf-eco-mapping.txt'
#goa = gafToRDF
#ds = goa.gafEcoMap(gaf_eco_file)
#pp.pprint(mapping)
#gramene_genomes = glob.glob(gramene_genes_files) #gramene_genes_files
#g_parse = grameneParsers#oryzaBaseParser
#print "***************** Gramene Genes data ********************\n"
#g_parse = grameneParsers
#input_f = '/home/venkatesan/workspace/explore/test_files/gramene_genes/Oryza_brachyantha.txt' #Oryza_barthii.txt' Oryza_sativa_japonica
#geneHash = g_parse.geneParser(gramene_genomes)#grameneQTLRDF(gramene_qtl_dir, gramene_qtl_out) oryzaBaseRDF(oryzabase_file, oryzaBase_output) grameneGeneRDF(gramene_genomes, gramene_genes_out)
#pp.pprint(geneHash)
#g_parse.grameneGeneRDF(gramene_genomes, gramene_genes_out)
#print "********************************************************\n\n"
#print "*************** Gramene QTL data ***************\n"
#g_parse.grameneQTLRDF(gramene_qtl_file, gramene_qtl_out) #gramene_qtl_file, gramene_qtl_out gramene_qtl_test_file, gramene_qtl_test_out
#print "***********************************************\n\n"
#print "*********** Cyc data *************\n"
#pw_files = glob.glob(ricecyc_file)
#ricecyc_ds = g_parse.CycParser(pw_files) # ricecyc_test_file ricecyc_file
#pp.pprint(ricecyc_ds)
#g_parse.CycRDF(ricecyc_ds, ricecyc_output) #ricecyc_test_output ricecyc_output
#print "*************************************\n\n"
#print "************ OryzaTagLine data **************\n"
#southG = southGreenParsers
#oryzaTag_ds = southG.otlParser(otl_inputfile) #otl_test_inputfile otl_inputfile
#pp.pprint(oryzaTag_ds)
#southG.otlRDF(oryzaTag_ds, otl_output)#otl_test_output otl_output
#print "********************************************\n\n"
# UniPort********
#up_files = glob.glob(up_dir)#up_dir up_test_dir
#print "*********** Uniprot data *************\n"
#up_converter = uniprotToRDF
#up_converter.upToRDF(up_files, up_output)#up_output up_test_output
#print "********************************************\n\n"
##print "************ TropGene data **************\n"
#path = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/test_files/tropGene/rice.csv' # The input
#path_output = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/rdf_ttl/tropGene_ttl/tropgene.rice.ttl' # The output
#ds = TropgeneModel.tropGeneParser(path)# The parsing file withe tropGeneParser()
#pp.pprint(ds) # For to see in teminal the parsing
#TropgeneModel.tropGeneToRDF(ds, path_output) # The tranformation fonction tropGeneToRdf(input, output)
##print "************ OS.Japonica data orygene_db **************\n"
#path = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/test_files/orygenes_db/os.japonicaCancat_test.gff3' # The input
#path_output = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/rdf_ttl/orygene_db_ttl/os.japonica.ttl' # The output
#ds = gffParser.parseGFF3(path) # The parsing file withe tropGeneParser()
#pp.pprint(ds) # For to see in teminal the parsing
#os_japonicaModel.os_indicaModeleRDF(ds, path_output)
##print "************ OS.Indica data orygene_db **************\n"
#path = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/test_files/orygenes_db/os.indicaCancat_Test.gff3' # The input
#path_output = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/rdf_ttl/orygene_db_ttl/os.indica.ttl' # The output
#ds = gffParser.parseGFF3(path) # The parsing file withe tropGeneParser()
#pp.pprint(ds) # For to see in teminal the parsing
#os_indicaModel.os_indicaModeleRDF(ds, path_output)
##print "************ A.thaliana data orygene_db **************\n"
#path = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/test_files/orygenes_db/a.thalianaCancat_test.gff3' # The input
#path_output = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/rdf_ttl/orygene_db_ttl/a.thaliana.ttl' # The output
#ds = gffParser.parseGFF3(path)
#pp.pprint(ds) # For to see in teminal the parsing
#a_thalianaModel.a_thalianaModeleRDF(ds, path_output)
##print "************ Sniplay datasniplay_db **************\n"
#path = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/test_files/sniplay_db/sniplay_test.txt'
#path_output = '/media/elhassouni/donnees/Noeud-plante-projet/workspace/AgroLD/AgroLD_ETL/rdf_ttl/sniplay_db_ttl/sniplay.ttl' # The output
sniplayPaserModel.sniplayPaserModel()
| cc0-1.0 |
MTASZTAKI/ApertusVR | plugins/languageAPI/jsAPI/3rdParty/nodejs/10.1.0/source/tools/gyp/pylib/gyp/MSVSNew.py | 1835 | 12124 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""New implementation of Visual Studio project generation."""
import os
import random
import gyp.common
# hashlib is supplied as of Python 2.5 as the replacement interface for md5
# and other secure hashes. In 2.6, md5 is deprecated. Import hashlib if
# available, avoiding a deprecation warning under 2.6. Import md5 otherwise,
# preserving 2.4 compatibility.
try:
import hashlib
_new_md5 = hashlib.md5
except ImportError:
import md5
_new_md5 = md5.new
# Initialize random number generator
random.seed()
# GUIDs for project types
ENTRY_TYPE_GUIDS = {
'project': '{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}',
'folder': '{2150E333-8FDC-42A3-9474-1A3956D46DE8}',
}
#------------------------------------------------------------------------------
# Helper functions
def MakeGuid(name, seed='msvs_new'):
"""Returns a GUID for the specified target name.
Args:
name: Target name.
seed: Seed for MD5 hash.
Returns:
A GUID-line string calculated from the name and seed.
This generates something which looks like a GUID, but depends only on the
name and seed. This means the same name/seed will always generate the same
GUID, so that projects and solutions which refer to each other can explicitly
determine the GUID to refer to explicitly. It also means that the GUID will
not change when the project for a target is rebuilt.
"""
# Calculate a MD5 signature for the seed and name.
d = _new_md5(str(seed) + str(name)).hexdigest().upper()
# Convert most of the signature to GUID form (discard the rest)
guid = ('{' + d[:8] + '-' + d[8:12] + '-' + d[12:16] + '-' + d[16:20]
+ '-' + d[20:32] + '}')
return guid
#------------------------------------------------------------------------------
class MSVSSolutionEntry(object):
def __cmp__(self, other):
# Sort by name then guid (so things are in order on vs2008).
return cmp((self.name, self.get_guid()), (other.name, other.get_guid()))
class MSVSFolder(MSVSSolutionEntry):
"""Folder in a Visual Studio project or solution."""
def __init__(self, path, name = None, entries = None,
guid = None, items = None):
"""Initializes the folder.
Args:
path: Full path to the folder.
name: Name of the folder.
entries: List of folder entries to nest inside this folder. May contain
Folder or Project objects. May be None, if the folder is empty.
guid: GUID to use for folder, if not None.
items: List of solution items to include in the folder project. May be
None, if the folder does not directly contain items.
"""
if name:
self.name = name
else:
# Use last layer.
self.name = os.path.basename(path)
self.path = path
self.guid = guid
# Copy passed lists (or set to empty lists)
self.entries = sorted(list(entries or []))
self.items = list(items or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['folder']
def get_guid(self):
if self.guid is None:
# Use consistent guids for folders (so things don't regenerate).
self.guid = MakeGuid(self.path, seed='msvs_folder')
return self.guid
#------------------------------------------------------------------------------
class MSVSProject(MSVSSolutionEntry):
"""Visual Studio project."""
def __init__(self, path, name = None, dependencies = None, guid = None,
spec = None, build_file = None, config_platform_overrides = None,
fixpath_prefix = None):
"""Initializes the project.
Args:
path: Absolute path to the project file.
name: Name of project. If None, the name will be the same as the base
name of the project file.
dependencies: List of other Project objects this project is dependent
upon, if not None.
guid: GUID to use for project, if not None.
spec: Dictionary specifying how to build this project.
build_file: Filename of the .gyp file that the vcproj file comes from.
config_platform_overrides: optional dict of configuration platforms to
used in place of the default for this target.
fixpath_prefix: the path used to adjust the behavior of _fixpath
"""
self.path = path
self.guid = guid
self.spec = spec
self.build_file = build_file
# Use project filename if name not specified
self.name = name or os.path.splitext(os.path.basename(path))[0]
# Copy passed lists (or set to empty lists)
self.dependencies = list(dependencies or [])
self.entry_type_guid = ENTRY_TYPE_GUIDS['project']
if config_platform_overrides:
self.config_platform_overrides = config_platform_overrides
else:
self.config_platform_overrides = {}
self.fixpath_prefix = fixpath_prefix
self.msbuild_toolset = None
def set_dependencies(self, dependencies):
self.dependencies = list(dependencies or [])
def get_guid(self):
if self.guid is None:
# Set GUID from path
# TODO(rspangler): This is fragile.
# 1. We can't just use the project filename sans path, since there could
# be multiple projects with the same base name (for example,
# foo/unittest.vcproj and bar/unittest.vcproj).
# 2. The path needs to be relative to $SOURCE_ROOT, so that the project
# GUID is the same whether it's included from base/base.sln or
# foo/bar/baz/baz.sln.
# 3. The GUID needs to be the same each time this builder is invoked, so
# that we don't need to rebuild the solution when the project changes.
# 4. We should be able to handle pre-built project files by reading the
# GUID from the files.
self.guid = MakeGuid(self.name)
return self.guid
def set_msbuild_toolset(self, msbuild_toolset):
self.msbuild_toolset = msbuild_toolset
#------------------------------------------------------------------------------
class MSVSSolution(object):
"""Visual Studio solution."""
def __init__(self, path, version, entries=None, variants=None,
websiteProperties=True):
"""Initializes the solution.
Args:
path: Path to solution file.
version: Format version to emit.
entries: List of entries in solution. May contain Folder or Project
objects. May be None, if the folder is empty.
variants: List of build variant strings. If none, a default list will
be used.
websiteProperties: Flag to decide if the website properties section
is generated.
"""
self.path = path
self.websiteProperties = websiteProperties
self.version = version
# Copy passed lists (or set to empty lists)
self.entries = list(entries or [])
if variants:
# Copy passed list
self.variants = variants[:]
else:
# Use default
self.variants = ['Debug|Win32', 'Release|Win32']
# TODO(rspangler): Need to be able to handle a mapping of solution config
# to project config. Should we be able to handle variants being a dict,
# or add a separate variant_map variable? If it's a dict, we can't
# guarantee the order of variants since dict keys aren't ordered.
# TODO(rspangler): Automatically write to disk for now; should delay until
# node-evaluation time.
self.Write()
def Write(self, writer=gyp.common.WriteOnDiff):
"""Writes the solution file to disk.
Raises:
IndexError: An entry appears multiple times.
"""
# Walk the entry tree and collect all the folders and projects.
all_entries = set()
entries_to_check = self.entries[:]
while entries_to_check:
e = entries_to_check.pop(0)
# If this entry has been visited, nothing to do.
if e in all_entries:
continue
all_entries.add(e)
# If this is a folder, check its entries too.
if isinstance(e, MSVSFolder):
entries_to_check += e.entries
all_entries = sorted(all_entries)
# Open file and print header
f = writer(self.path)
f.write('Microsoft Visual Studio Solution File, '
'Format Version %s\r\n' % self.version.SolutionVersion())
f.write('# %s\r\n' % self.version.Description())
# Project entries
sln_root = os.path.split(self.path)[0]
for e in all_entries:
relative_path = gyp.common.RelativePath(e.path, sln_root)
# msbuild does not accept an empty folder_name.
# use '.' in case relative_path is empty.
folder_name = relative_path.replace('/', '\\') or '.'
f.write('Project("%s") = "%s", "%s", "%s"\r\n' % (
e.entry_type_guid, # Entry type GUID
e.name, # Folder name
folder_name, # Folder name (again)
e.get_guid(), # Entry GUID
))
# TODO(rspangler): Need a way to configure this stuff
if self.websiteProperties:
f.write('\tProjectSection(WebsiteProperties) = preProject\r\n'
'\t\tDebug.AspNetCompiler.Debug = "True"\r\n'
'\t\tRelease.AspNetCompiler.Debug = "False"\r\n'
'\tEndProjectSection\r\n')
if isinstance(e, MSVSFolder):
if e.items:
f.write('\tProjectSection(SolutionItems) = preProject\r\n')
for i in e.items:
f.write('\t\t%s = %s\r\n' % (i, i))
f.write('\tEndProjectSection\r\n')
if isinstance(e, MSVSProject):
if e.dependencies:
f.write('\tProjectSection(ProjectDependencies) = postProject\r\n')
for d in e.dependencies:
f.write('\t\t%s = %s\r\n' % (d.get_guid(), d.get_guid()))
f.write('\tEndProjectSection\r\n')
f.write('EndProject\r\n')
# Global section
f.write('Global\r\n')
# Configurations (variants)
f.write('\tGlobalSection(SolutionConfigurationPlatforms) = preSolution\r\n')
for v in self.variants:
f.write('\t\t%s = %s\r\n' % (v, v))
f.write('\tEndGlobalSection\r\n')
# Sort config guids for easier diffing of solution changes.
config_guids = []
config_guids_overrides = {}
for e in all_entries:
if isinstance(e, MSVSProject):
config_guids.append(e.get_guid())
config_guids_overrides[e.get_guid()] = e.config_platform_overrides
config_guids.sort()
f.write('\tGlobalSection(ProjectConfigurationPlatforms) = postSolution\r\n')
for g in config_guids:
for v in self.variants:
nv = config_guids_overrides[g].get(v, v)
# Pick which project configuration to build for this solution
# configuration.
f.write('\t\t%s.%s.ActiveCfg = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
# Enable project in this solution configuration.
f.write('\t\t%s.%s.Build.0 = %s\r\n' % (
g, # Project GUID
v, # Solution build configuration
nv, # Project build config for that solution config
))
f.write('\tEndGlobalSection\r\n')
# TODO(rspangler): Should be able to configure this stuff too (though I've
# never seen this be any different)
f.write('\tGlobalSection(SolutionProperties) = preSolution\r\n')
f.write('\t\tHideSolutionNode = FALSE\r\n')
f.write('\tEndGlobalSection\r\n')
# Folder mappings
# Omit this section if there are no folders
if any([e.entries for e in all_entries if isinstance(e, MSVSFolder)]):
f.write('\tGlobalSection(NestedProjects) = preSolution\r\n')
for e in all_entries:
if not isinstance(e, MSVSFolder):
continue # Does not apply to projects, only folders
for subentry in e.entries:
f.write('\t\t%s = %s\r\n' % (subentry.get_guid(), e.get_guid()))
f.write('\tEndGlobalSection\r\n')
f.write('EndGlobal\r\n')
f.close()
| mit |
hkariti/ansible | lib/ansible/modules/network/nxos/nxos_bgp_neighbor_af.py | 5 | 27591 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_bgp_neighbor_af
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages BGP address-family's neighbors configuration.
description:
- Manages BGP address-family's neighbors configurations on NX-OS switches.
author: Gabriele Gerbino (@GGabriele)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- C(state=absent) removes the whole BGP address-family's
neighbor configuration.
- Default, when supported, removes properties
- In order to default maximum-prefix configuration, only
C(max_prefix_limit=default) is needed.
options:
asn:
description:
- BGP autonomous system number. Valid values are String,
Integer in ASPLAIN or ASDOT notation.
required: true
vrf:
description:
- Name of the VRF. The name 'default' is a valid VRF representing
the global bgp.
required: false
default: default
neighbor:
description:
- Neighbor Identifier. Valid values are string. Neighbors may use
IPv4 or IPv6 notation, with or without prefix length.
required: true
afi:
description:
- Address Family Identifier.
required: true
choices: ['ipv4','ipv6', 'vpnv4', 'vpnv6', 'l2vpn']
safi:
description:
- Sub Address Family Identifier.
required: true
choices: ['unicast','multicast', 'evpn']
additional_paths_receive:
description:
- Valid values are enable for basic command enablement; disable
for disabling the command at the neighbor af level
(it adds the disable keyword to the basic command); and inherit
to remove the command at this level (the command value is
inherited from a higher BGP layer).
required: false
choices: ['enable','disable', 'inherit']
default: null
additional_paths_send:
description:
- Valid values are enable for basic command enablement; disable
for disabling the command at the neighbor af level
(it adds the disable keyword to the basic command); and inherit
to remove the command at this level (the command value is
inherited from a higher BGP layer).
required: false
choices: ['enable','disable', 'inherit']
default: null
advertise_map_exist:
description:
- Conditional route advertisement. This property requires two
route maps, an advertise-map and an exist-map. Valid values are
an array specifying both the advertise-map name and the exist-map
name, or simply 'default' e.g. ['my_advertise_map',
'my_exist_map']. This command is mutually exclusive with the
advertise_map_non_exist property.
required: false
default: null
advertise_map_non_exist:
description:
- Conditional route advertisement. This property requires two
route maps, an advertise-map and an exist-map. Valid values are
an array specifying both the advertise-map name and the
non-exist-map name, or simply 'default' e.g.
['my_advertise_map', 'my_non_exist_map']. This command is mutually
exclusive with the advertise_map_exist property.
required: false
default: null
allowas_in:
description:
- Activate allowas-in property
required: false
default: null
allowas_in_max:
description:
- Max-occurrences value for allowas_in. Valid values are
an integer value or 'default'. This is mutually exclusive with
allowas_in.
required: false
default: null
as_override:
description:
- Activate the as-override feature.
required: false
choices: ['true', 'false']
default: null
default_originate:
description:
- Activate the default-originate feature.
required: false
choices: ['true', 'false']
default: null
default_originate_route_map:
description:
- Route-map for the default_originate property.
Valid values are a string defining a route-map name,
or 'default'. This is mutually exclusive with
default_originate.
required: false
default: null
disable_peer_as_check:
description:
- Disable checking of peer AS-number while advertising
required: false
choices: ['true', 'false']
version_added: 2.5
filter_list_in:
description:
- Valid values are a string defining a filter-list name,
or 'default'.
required: false
default: null
filter_list_out:
description:
- Valid values are a string defining a filter-list name,
or 'default'.
required: false
default: null
max_prefix_limit:
description:
- maximum-prefix limit value. Valid values are an integer value
or 'default'.
required: false
default: null
max_prefix_interval:
description:
- Optional restart interval. Valid values are an integer.
Requires max_prefix_limit. May not be combined with max_prefix_warning.
required: false
default: null
max_prefix_threshold:
description:
- Optional threshold percentage at which to generate a warning.
Valid values are an integer value.
Requires max_prefix_limit.
required: false
default: null
max_prefix_warning:
description:
- Optional warning-only keyword. Requires max_prefix_limit. May not be
combined with max_prefix_interval.
required: false
choices: ['true','false']
default: null
next_hop_self:
description:
- Activate the next-hop-self feature.
required: false
choices: ['true','false']
default: null
next_hop_third_party:
description:
- Activate the next-hop-third-party feature.
required: false
choices: ['true','false']
default: null
prefix_list_in:
description:
- Valid values are a string defining a prefix-list name,
or 'default'.
required: false
default: null
prefix_list_out:
description:
- Valid values are a string defining a prefix-list name,
or 'default'.
required: false
default: null
route_map_in:
description:
- Valid values are a string defining a route-map name,
or 'default'.
required: false
default: null
route_map_out:
description:
- Valid values are a string defining a route-map name,
or 'default'.
required: false
default: null
route_reflector_client:
description:
- Router reflector client.
required: false
choices: ['true','false']
default: null
send_community:
description:
- send-community attribute.
required: false
choices: ['none', 'both', 'extended', 'standard', 'default']
default: null
soft_reconfiguration_in:
description:
- Valid values are 'enable' for basic command enablement; 'always'
to add the always keyword to the basic command; and 'inherit' to
remove the command at this level (the command value is inherited
from a higher BGP layer).
required: false
choices: ['enable','always','inherit']
default: null
soo:
description:
- Site-of-origin. Valid values are a string defining a VPN
extcommunity or 'default'.
required: false
default: null
suppress_inactive:
description:
- suppress-inactive feature.
required: false
choices: ['true','false','default']
default: null
unsuppress_map:
description:
- unsuppress-map. Valid values are a string defining a route-map
name or 'default'.
required: false
default: null
weight:
description:
- Weight value. Valid values are an integer value or 'default'.
required: false
default: null
state:
description:
- Determines whether the config should be present or not
on the device.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: configure RR client
nxos_bgp_neighbor_af:
asn: 65535
neighbor: '3.3.3.3'
afi: ipv4
safi: unicast
route_reflector_client: true
state: present
'''
RETURN = '''
commands:
description: commands sent to the device
returned: always
type: list
sample: ["router bgp 65535", "neighbor 3.3.3.3",
"address-family ipv4 unicast", "route-reflector-client"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.common.config import CustomNetworkConfig
BOOL_PARAMS = [
'allowas_in',
'as_override',
'default_originate',
'disable_peer_as_check',
'next_hop_self',
'next_hop_third_party',
'route_reflector_client',
'suppress_inactive'
]
PARAM_TO_COMMAND_KEYMAP = {
'afi': 'address-family',
'asn': 'router bgp',
'neighbor': 'neighbor',
'additional_paths_receive': 'capability additional-paths receive',
'additional_paths_send': 'capability additional-paths send',
'advertise_map_exist': 'advertise-map exist-map',
'advertise_map_non_exist': 'advertise-map non-exist-map',
'allowas_in': 'allowas-in',
'allowas_in_max': 'allowas-in',
'as_override': 'as-override',
'default_originate': 'default-originate',
'default_originate_route_map': 'default-originate route-map',
'disable_peer_as_check': 'disable-peer-as-check',
'filter_list_in': 'filter-list in',
'filter_list_out': 'filter-list out',
'max_prefix_limit': 'maximum-prefix',
'max_prefix_interval': 'maximum-prefix interval',
'max_prefix_threshold': 'maximum-prefix threshold',
'max_prefix_warning': 'maximum-prefix warning',
'next_hop_self': 'next-hop-self',
'next_hop_third_party': 'next-hop-third-party',
'prefix_list_in': 'prefix-list in',
'prefix_list_out': 'prefix-list out',
'route_map_in': 'route-map in',
'route_map_out': 'route-map out',
'route_reflector_client': 'route-reflector-client',
'safi': 'address-family',
'send_community': 'send-community',
'soft_reconfiguration_in': 'soft-reconfiguration inbound',
'soo': 'soo',
'suppress_inactive': 'suppress-inactive',
'unsuppress_map': 'unsuppress-map',
'weight': 'weight',
'vrf': 'vrf'
}
def get_value(arg, config, module):
custom = [
'additional_paths_send',
'additional_paths_receive',
'max_prefix_limit',
'max_prefix_interval',
'max_prefix_threshold',
'max_prefix_warning',
'send_community',
'soft_reconfiguration_in'
]
command = PARAM_TO_COMMAND_KEYMAP[arg]
has_command = re.search(r'^\s+{0}\s*'.format(command), config, re.M)
has_command_val = re.search(r'(?:{0}\s)(?P<value>.*)$'.format(command), config, re.M)
value = ''
if arg in custom:
value = get_custom_value(arg, config, module)
elif arg == 'next_hop_third_party':
has_no_command = re.search(r'^\s+no\s+{0}\s*$'.format(command), config, re.M)
value = False
if not has_no_command:
value = True
elif arg in BOOL_PARAMS:
value = False
if has_command:
value = True
elif command.startswith('advertise-map'):
value = []
has_adv_map = re.search(r'{0}\s(?P<value1>.*)\s{1}\s(?P<value2>.*)$'.format(*command.split()), config, re.M)
if has_adv_map:
value = list(has_adv_map.groups())
elif command.split()[0] in ['filter-list', 'prefix-list', 'route-map']:
has_cmd_direction_val = re.search(r'{0}\s(?P<value>.*)\s{1}$'.format(*command.split()), config, re.M)
if has_cmd_direction_val:
value = has_cmd_direction_val.group('value')
elif has_command_val:
value = has_command_val.group('value')
return value
def get_custom_value(arg, config, module):
command = PARAM_TO_COMMAND_KEYMAP.get(arg)
splitted_config = config.splitlines()
value = ''
command_re = re.compile(r'\s+{0}\s*'.format(command), re.M)
has_command = command_re.search(config)
command_val_re = re.compile(r'(?:{0}\s)(?P<value>.*)$'.format(command), re.M)
has_command_val = command_val_re.search(config)
if arg.startswith('additional_paths'):
value = 'inherit'
for line in splitted_config:
if command in line:
if 'disable' in line:
value = 'disable'
else:
value = 'enable'
elif arg.startswith('max_prefix'):
for line in splitted_config:
if 'maximum-prefix' in line:
splitted_line = line.split()
if arg == 'max_prefix_limit':
value = splitted_line[1]
elif arg == 'max_prefix_interval' and 'restart' in line:
value = splitted_line[-1]
elif arg == 'max_prefix_threshold' and len(splitted_line) > 2:
try:
int(splitted_line[2])
value = splitted_line[2]
except ValueError:
value = ''
elif arg == 'max_prefix_warning':
value = 'warning-only' in line
elif arg == 'soft_reconfiguration_in':
value = 'inherit'
for line in splitted_config:
if command in line:
if 'always' in line:
value = 'always'
else:
value = 'enable'
elif arg == 'send_community':
value = 'none'
for line in splitted_config:
if command in line:
if 'extended' in line:
if value == 'standard':
value = 'both'
else:
value = 'extended'
elif 'both' in line:
value = 'both'
else:
value = 'standard'
return value
def get_existing(module, args, warnings):
existing = {}
netcfg = CustomNetworkConfig(indent=2, contents=get_config(module))
asn_regex = re.compile(r'.*router\sbgp\s(?P<existing_asn>\d+(\.\d+)?).*', re.S)
match_asn = asn_regex.match(str(netcfg))
if match_asn:
existing_asn = match_asn.group('existing_asn')
parents = ["router bgp {0}".format(existing_asn)]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
parents.append('address-family {0} {1}'.format(module.params['afi'], module.params['safi']))
config = netcfg.get_section(parents)
if config:
for arg in args:
if arg not in ['asn', 'vrf', 'neighbor', 'afi', 'safi']:
existing[arg] = get_value(arg, config, module)
existing['asn'] = existing_asn
existing['neighbor'] = module.params['neighbor']
existing['vrf'] = module.params['vrf']
existing['afi'] = module.params['afi']
existing['safi'] = module.params['safi']
else:
warnings.append("The BGP process didn't exist but the task just created it.")
return existing
def apply_key_map(key_map, table):
new_dict = {}
for key in table:
new_key = key_map.get(key)
if new_key:
new_dict[new_key] = table.get(key)
return new_dict
def get_default_command(key, value, existing_commands):
command = ''
if existing_commands.get(key):
existing_value = existing_commands.get(key)
if value == 'inherit':
if existing_value != 'inherit':
command = 'no {0}'.format(key)
else:
if key == 'advertise-map exist-map':
command = 'no advertise-map {0} exist-map {1}'.format(
existing_value[0], existing_value[1])
elif key == 'advertise-map non-exist-map':
command = 'no advertise-map {0} non-exist-map {1}'.format(
existing_value[0], existing_value[1])
elif key == 'filter-list in':
command = 'no filter-list {0} in'.format(existing_value)
elif key == 'filter-list out':
command = 'no filter-list {0} out'.format(existing_value)
elif key == 'prefix-list in':
command = 'no prefix-list {0} in'.format(existing_value)
elif key == 'prefix-list out':
command = 'no prefix-list {0} out'.format(existing_value)
elif key == 'route-map in':
command = 'no route-map {0} in'.format(existing_value)
elif key == 'route-map out':
command = 'no route-map {0} out'.format(existing_value)
elif key.startswith('maximum-prefix'):
command = 'no maximum-prefix'
elif key == 'allowas-in max':
command = ['no allowas-in {0}'.format(existing_value)]
command.append('allowas-in')
else:
command = 'no {0} {1}'.format(key, existing_value)
else:
if key.replace(' ', '_').replace('-', '_') in BOOL_PARAMS:
command = 'no {0}'.format(key)
return command
def fix_proposed(module, existing, proposed):
allowas_in = proposed.get('allowas_in')
allowas_in_max = proposed.get('allowas_in_max')
if allowas_in_max and not allowas_in:
proposed.pop('allowas_in_max')
elif allowas_in and allowas_in_max:
proposed.pop('allowas_in')
if existing.get('send_community') == 'none' and proposed.get('send_community') == 'default':
proposed.pop('send_community')
return proposed
def state_present(module, existing, proposed, candidate):
commands = list()
proposed = fix_proposed(module, existing, proposed)
proposed_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, proposed)
existing_commands = apply_key_map(PARAM_TO_COMMAND_KEYMAP, existing)
for key, value in proposed_commands.items():
if value in ['inherit', 'default']:
command = get_default_command(key, value, existing_commands)
if isinstance(command, str):
if command and command not in commands:
commands.append(command)
elif isinstance(command, list):
for cmd in command:
if cmd not in commands:
commands.append(cmd)
elif key.startswith('maximum-prefix'):
if module.params['max_prefix_limit'] != 'default':
command = 'maximum-prefix {0}'.format(module.params['max_prefix_limit'])
if module.params['max_prefix_threshold']:
command += ' {0}'.format(module.params['max_prefix_threshold'])
if module.params['max_prefix_interval']:
command += ' restart {0}'.format(module.params['max_prefix_interval'])
elif module.params['max_prefix_warning']:
command += ' warning-only'
commands.append(command)
elif value is True:
commands.append(key)
elif value is False:
commands.append('no {0}'.format(key))
elif key == 'address-family':
commands.append("address-family {0} {1}".format(module.params['afi'], module.params['safi']))
elif key.startswith('capability additional-paths'):
command = key
if value == 'disable':
command += ' disable'
commands.append(command)
elif key.startswith('advertise-map'):
direction = key.split()[1]
commands.append('advertise-map {1} {0} {2}'.format(direction, *value))
elif key.split()[0] in ['filter-list', 'prefix-list', 'route-map']:
commands.append('{1} {0} {2}'.format(value, *key.split()))
elif key == 'soft-reconfiguration inbound':
command = ''
if value == 'enable':
command = key
elif value == 'always':
command = '{0} {1}'.format(key, value)
commands.append(command)
elif key == 'send-community':
command = key
if value in ['standard', 'extended']:
commands.append('no ' + key + ' both')
command += ' {0}'.format(value)
commands.append(command)
else:
command = '{0} {1}'.format(key, value)
commands.append(command)
if commands:
parents = ['router bgp {0}'.format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
af_command = 'address-family {0} {1}'.format(
module.params['afi'], module.params['safi'])
parents.append(af_command)
if af_command in commands:
commands.remove(af_command)
candidate.add(commands, parents=parents)
def state_absent(module, existing, candidate):
commands = []
parents = ["router bgp {0}".format(module.params['asn'])]
if module.params['vrf'] != 'default':
parents.append('vrf {0}'.format(module.params['vrf']))
parents.append('neighbor {0}'.format(module.params['neighbor']))
commands.append('no address-family {0} {1}'.format(
module.params['afi'], module.params['safi']))
candidate.add(commands, parents=parents)
def main():
argument_spec = dict(
asn=dict(required=True, type='str'),
vrf=dict(required=False, type='str', default='default'),
neighbor=dict(required=True, type='str'),
afi=dict(required=True, type='str'),
safi=dict(required=True, type='str'),
additional_paths_receive=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
additional_paths_send=dict(required=False, type='str', choices=['enable', 'disable', 'inherit']),
advertise_map_exist=dict(required=False, type='list'),
advertise_map_non_exist=dict(required=False, type='list'),
allowas_in=dict(required=False, type='bool'),
allowas_in_max=dict(required=False, type='str'),
as_override=dict(required=False, type='bool'),
default_originate=dict(required=False, type='bool'),
default_originate_route_map=dict(required=False, type='str'),
disable_peer_as_check=dict(required=False, type='bool'),
filter_list_in=dict(required=False, type='str'),
filter_list_out=dict(required=False, type='str'),
max_prefix_limit=dict(required=False, type='str'),
max_prefix_interval=dict(required=False, type='str'),
max_prefix_threshold=dict(required=False, type='str'),
max_prefix_warning=dict(required=False, type='bool'),
next_hop_self=dict(required=False, type='bool'),
next_hop_third_party=dict(required=False, type='bool'),
prefix_list_in=dict(required=False, type='str'),
prefix_list_out=dict(required=False, type='str'),
route_map_in=dict(required=False, type='str'),
route_map_out=dict(required=False, type='str'),
route_reflector_client=dict(required=False, type='bool'),
send_community=dict(required=False, choices=['none', 'both', 'extended', 'standard', 'default']),
soft_reconfiguration_in=dict(required=False, type='str', choices=['enable', 'always', 'inherit']),
soo=dict(required=False, type='str'),
suppress_inactive=dict(required=False, type='bool'),
unsuppress_map=dict(required=False, type='str'),
weight=dict(required=False, type='str'),
state=dict(choices=['present', 'absent'], default='present', required=False),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=[['advertise_map_exist', 'advertise_map_non_exist'],
['max_prefix_interval', 'max_prefix_warning'],
['default_originate', 'default_originate_route_map'],
['allowas_in', 'allowas_in_max']],
supports_check_mode=True,
)
warnings = list()
check_args(module, warnings)
result = dict(changed=False, warnings=warnings)
state = module.params['state']
for key in ['max_prefix_interval', 'max_prefix_warning', 'max_prefix_threshold']:
if module.params[key] and not module.params['max_prefix_limit']:
module.fail_json(
msg='max_prefix_limit is required when using %s' % key
)
if module.params['vrf'] == 'default' and module.params['soo']:
module.fail_json(msg='SOO is only allowed in non-default VRF')
args = PARAM_TO_COMMAND_KEYMAP.keys()
existing = get_existing(module, args, warnings)
if existing.get('asn') and state == 'present':
if existing.get('asn') != module.params['asn']:
module.fail_json(msg='Another BGP ASN already exists.',
proposed_asn=module.params['asn'],
existing_asn=existing.get('asn'))
for param in ['advertise_map_exist', 'advertise_map_non_exist']:
if module.params[param] == ['default']:
module.params[param] = 'default'
proposed_args = dict((k, v) for k, v in module.params.items() if v is not None and k in args)
proposed = {}
for key, value in proposed_args.items():
if key not in ['asn', 'vrf', 'neighbor']:
if not isinstance(value, list):
if str(value).lower() == 'true':
value = True
elif str(value).lower() == 'false':
value = False
elif str(value).lower() == 'default':
if key in BOOL_PARAMS:
value = False
else:
value = 'default'
elif key == 'send_community' and str(value).lower() == 'none':
value = 'default'
if existing.get(key) != value:
proposed[key] = value
candidate = CustomNetworkConfig(indent=3)
if state == 'present':
state_present(module, existing, proposed, candidate)
elif state == 'absent' and existing:
state_absent(module, existing, candidate)
if candidate:
candidate = candidate.items_text()
load_config(module, candidate)
result['changed'] = True
result['commands'] = candidate
else:
result['commands'] = []
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
dvliman/jaikuengine | .google_appengine/lib/django-1.2/django/contrib/formtools/wizard.py | 44 | 10889 | """
FormWizard class -- implements a multi-page form, validating between each
step and storing the form's state as HTML hidden fields so that no state is
stored on the server side.
"""
import cPickle as pickle
from django import forms
from django.conf import settings
from django.http import Http404
from django.shortcuts import render_to_response
from django.template.context import RequestContext
from django.utils.hashcompat import md5_constructor
from django.utils.translation import ugettext_lazy as _
from django.contrib.formtools.utils import security_hash
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_protect
class FormWizard(object):
# The HTML (and POST data) field name for the "step" variable.
step_field_name="wizard_step"
# METHODS SUBCLASSES SHOULDN'T OVERRIDE ###################################
def __init__(self, form_list, initial=None):
"""
Start a new wizard with a list of forms.
form_list should be a list of Form classes (not instances).
"""
self.form_list = form_list[:]
self.initial = initial or {}
# Dictionary of extra template context variables.
self.extra_context = {}
# A zero-based counter keeping track of which step we're in.
self.step = 0
def __repr__(self):
return "step: %d\nform_list: %s\ninitial_data: %s" % (self.step, self.form_list, self.initial)
def get_form(self, step, data=None):
"Helper method that returns the Form instance for the given step."
return self.form_list[step](data, prefix=self.prefix_for_step(step), initial=self.initial.get(step, None))
def num_steps(self):
"Helper method that returns the number of steps."
# You might think we should just set "self.num_steps = len(form_list)"
# in __init__(), but this calculation needs to be dynamic, because some
# hook methods might alter self.form_list.
return len(self.form_list)
@method_decorator(csrf_protect)
def __call__(self, request, *args, **kwargs):
"""
Main method that does all the hard work, conforming to the Django view
interface.
"""
if 'extra_context' in kwargs:
self.extra_context.update(kwargs['extra_context'])
current_step = self.determine_step(request, *args, **kwargs)
self.parse_params(request, *args, **kwargs)
# Sanity check.
if current_step >= self.num_steps():
raise Http404('Step %s does not exist' % current_step)
# Validate and process all the previous forms before instantiating the
# current step's form in case self.process_step makes changes to
# self.form_list.
# If any of them fails validation, that must mean the validator relied
# on some other input, such as an external Web site.
# It is also possible that alidation might fail under certain attack
# situations: an attacker might be able to bypass previous stages, and
# generate correct security hashes for all the skipped stages by virtue
# of:
# 1) having filled out an identical form which doesn't have the
# validation (and does something different at the end),
# 2) or having filled out a previous version of the same form which
# had some validation missing,
# 3) or previously having filled out the form when they had more
# privileges than they do now.
#
# Since the hashes only take into account values, and not other other
# validation the form might do, we must re-do validation now for
# security reasons.
previous_form_list = []
for i in range(current_step):
f = self.get_form(i, request.POST)
if request.POST.get("hash_%d" % i, '') != self.security_hash(request, f):
return self.render_hash_failure(request, i)
if not f.is_valid():
return self.render_revalidation_failure(request, i, f)
else:
self.process_step(request, f, i)
previous_form_list.append(f)
# Process the current step. If it's valid, go to the next step or call
# done(), depending on whether any steps remain.
if request.method == 'POST':
form = self.get_form(current_step, request.POST)
else:
form = self.get_form(current_step)
if form.is_valid():
self.process_step(request, form, current_step)
next_step = current_step + 1
if next_step == self.num_steps():
return self.done(request, previous_form_list + [form])
else:
form = self.get_form(next_step)
self.step = current_step = next_step
return self.render(form, request, current_step)
def render(self, form, request, step, context=None):
"Renders the given Form object, returning an HttpResponse."
old_data = request.POST
prev_fields = []
if old_data:
hidden = forms.HiddenInput()
# Collect all data from previous steps and render it as HTML hidden fields.
for i in range(step):
old_form = self.get_form(i, old_data)
hash_name = 'hash_%s' % i
prev_fields.extend([bf.as_hidden() for bf in old_form])
prev_fields.append(hidden.render(hash_name, old_data.get(hash_name, self.security_hash(request, old_form))))
return self.render_template(request, form, ''.join(prev_fields), step, context)
# METHODS SUBCLASSES MIGHT OVERRIDE IF APPROPRIATE ########################
def prefix_for_step(self, step):
"Given the step, returns a Form prefix to use."
return str(step)
def render_hash_failure(self, request, step):
"""
Hook for rendering a template if a hash check failed.
step is the step that failed. Any previous step is guaranteed to be
valid.
This default implementation simply renders the form for the given step,
but subclasses may want to display an error message, etc.
"""
return self.render(self.get_form(step), request, step, context={'wizard_error': _('We apologize, but your form has expired. Please continue filling out the form from this page.')})
def render_revalidation_failure(self, request, step, form):
"""
Hook for rendering a template if final revalidation failed.
It is highly unlikely that this point would ever be reached, but See
the comment in __call__() for an explanation.
"""
return self.render(form, request, step)
def security_hash(self, request, form):
"""
Calculates the security hash for the given HttpRequest and Form instances.
Subclasses may want to take into account request-specific information,
such as the IP address.
"""
return security_hash(request, form)
def determine_step(self, request, *args, **kwargs):
"""
Given the request object and whatever *args and **kwargs were passed to
__call__(), returns the current step (which is zero-based).
Note that the result should not be trusted. It may even be a completely
invalid number. It's not the job of this method to validate it.
"""
if not request.POST:
return 0
try:
step = int(request.POST.get(self.step_field_name, 0))
except ValueError:
return 0
return step
def parse_params(self, request, *args, **kwargs):
"""
Hook for setting some state, given the request object and whatever
*args and **kwargs were passed to __call__(), sets some state.
This is called at the beginning of __call__().
"""
pass
def get_template(self, step):
"""
Hook for specifying the name of the template to use for a given step.
Note that this can return a tuple of template names if you'd like to
use the template system's select_template() hook.
"""
return 'forms/wizard.html'
def render_template(self, request, form, previous_fields, step, context=None):
"""
Renders the template for the given step, returning an HttpResponse object.
Override this method if you want to add a custom context, return a
different MIME type, etc. If you only need to override the template
name, use get_template() instead.
The template will be rendered with the following context:
step_field -- The name of the hidden field containing the step.
step0 -- The current step (zero-based).
step -- The current step (one-based).
step_count -- The total number of steps.
form -- The Form instance for the current step (either empty
or with errors).
previous_fields -- A string representing every previous data field,
plus hashes for completed forms, all in the form of
hidden fields. Note that you'll need to run this
through the "safe" template filter, to prevent
auto-escaping, because it's raw HTML.
"""
context = context or {}
context.update(self.extra_context)
return render_to_response(self.get_template(step), dict(context,
step_field=self.step_field_name,
step0=step,
step=step + 1,
step_count=self.num_steps(),
form=form,
previous_fields=previous_fields
), context_instance=RequestContext(request))
def process_step(self, request, form, step):
"""
Hook for modifying the FormWizard's internal state, given a fully
validated Form object. The Form is guaranteed to have clean, valid
data.
This method should *not* modify any of that data. Rather, it might want
to set self.extra_context or dynamically alter self.form_list, based on
previously submitted forms.
Note that this method is called every time a page is rendered for *all*
submitted steps.
"""
pass
# METHODS SUBCLASSES MUST OVERRIDE ########################################
def done(self, request, form_list):
"""
Hook for doing something with the validated data. This is responsible
for the final processing.
form_list is a list of Form instances, each containing clean, valid
data.
"""
raise NotImplementedError("Your %s class has not defined a done() method, which is required." % self.__class__.__name__)
| apache-2.0 |
3dfxmadscientist/CBSS | addons/account/account_invoice.py | 2 | 97945 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from lxml import etree
import openerp.addons.decimal_precision as dp
import openerp.exceptions
from openerp import netsvc
from openerp import pooler
from openerp.osv import fields, osv, orm
from openerp.tools.translate import _
class account_invoice(osv.osv):
def _amount_all(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
res[invoice.id] = {
'amount_untaxed': 0.0,
'amount_tax': 0.0,
'amount_total': 0.0
}
for line in invoice.invoice_line:
res[invoice.id]['amount_untaxed'] += line.price_subtotal
for line in invoice.tax_line:
res[invoice.id]['amount_tax'] += line.amount
res[invoice.id]['amount_total'] = res[invoice.id]['amount_tax'] + res[invoice.id]['amount_untaxed']
return res
def _get_journal(self, cr, uid, context=None):
if context is None:
context = {}
type_inv = context.get('type', 'out_invoice')
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
company_id = context.get('company_id', user.company_id.id)
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale_refund', 'in_refund': 'purchase_refund'}
journal_obj = self.pool.get('account.journal')
domain = [('company_id', '=', company_id)]
if isinstance(type_inv, list):
domain.append(('type', 'in', [type2journal.get(type) for type in type_inv if type2journal.get(type)]))
else:
domain.append(('type', '=', type2journal.get(type_inv, 'sale')))
res = journal_obj.search(cr, uid, domain, limit=1)
return res and res[0] or False
def _get_currency(self, cr, uid, context=None):
res = False
journal_id = self._get_journal(cr, uid, context=context)
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
res = journal.currency and journal.currency.id or journal.company_id.currency_id.id
return res
def _get_journal_analytic(self, cr, uid, type_inv, context=None):
type2journal = {'out_invoice': 'sale', 'in_invoice': 'purchase', 'out_refund': 'sale', 'in_refund': 'purchase'}
tt = type2journal.get(type_inv, 'sale')
result = self.pool.get('account.analytic.journal').search(cr, uid, [('type','=',tt)], context=context)
if not result:
raise osv.except_osv(_('No Analytic Journal!'),_("You must define an analytic journal of type '%s'!") % (tt,))
return result[0]
def _get_type(self, cr, uid, context=None):
if context is None:
context = {}
return context.get('type', 'out_invoice')
def _reconciled(self, cr, uid, ids, name, args, context=None):
res = {}
wf_service = netsvc.LocalService("workflow")
for inv in self.browse(cr, uid, ids, context=context):
res[inv.id] = self.test_paid(cr, uid, [inv.id])
if not res[inv.id] and inv.state == 'paid':
wf_service.trg_validate(uid, 'account.invoice', inv.id, 'open_test', cr)
return res
def _get_reference_type(self, cr, uid, context=None):
return [('none', _('Free Reference'))]
def _amount_residual(self, cr, uid, ids, name, args, context=None):
"""Function of the field residua. It computes the residual amount (balance) for each invoice"""
if context is None:
context = {}
ctx = context.copy()
result = {}
currency_obj = self.pool.get('res.currency')
for invoice in self.browse(cr, uid, ids, context=context):
nb_inv_in_partial_rec = max_invoice_id = 0
result[invoice.id] = 0.0
if invoice.move_id:
for aml in invoice.move_id.line_id:
if aml.account_id.type in ('receivable','payable'):
if aml.currency_id and aml.currency_id.id == invoice.currency_id.id:
result[invoice.id] += aml.amount_residual_currency
else:
ctx['date'] = aml.date
result[invoice.id] += currency_obj.compute(cr, uid, aml.company_id.currency_id.id, invoice.currency_id.id, aml.amount_residual, context=ctx)
if aml.reconcile_partial_id.line_partial_ids:
#we check if the invoice is partially reconciled and if there are other invoices
#involved in this partial reconciliation (and we sum these invoices)
for line in aml.reconcile_partial_id.line_partial_ids:
if line.invoice and invoice.type == line.invoice.type:
nb_inv_in_partial_rec += 1
#store the max invoice id as for this invoice we will make a balance instead of a simple division
max_invoice_id = max(max_invoice_id, line.invoice.id)
if nb_inv_in_partial_rec:
#if there are several invoices in a partial reconciliation, we split the residual by the number
#of invoice to have a sum of residual amounts that matches the partner balance
new_value = currency_obj.round(cr, uid, invoice.currency_id, result[invoice.id] / nb_inv_in_partial_rec)
if invoice.id == max_invoice_id:
#if it's the last the invoice of the bunch of invoices partially reconciled together, we make a
#balance to avoid rounding errors
result[invoice.id] = result[invoice.id] - ((nb_inv_in_partial_rec - 1) * new_value)
else:
result[invoice.id] = new_value
#prevent the residual amount on the invoice to be less than 0
result[invoice.id] = max(result[invoice.id], 0.0)
return result
# Give Journal Items related to the payment reconciled to this invoice
# Return ids of partial and total payments related to the selected invoices
def _get_lines(self, cr, uid, ids, name, arg, context=None):
res = {}
for invoice in self.browse(cr, uid, ids, context=context):
id = invoice.id
res[id] = []
if not invoice.move_id:
continue
data_lines = [x for x in invoice.move_id.line_id if x.account_id.id == invoice.account_id.id]
partial_ids = []
for line in data_lines:
ids_line = []
if line.reconcile_id:
ids_line = line.reconcile_id.line_id
elif line.reconcile_partial_id:
ids_line = line.reconcile_partial_id.line_partial_ids
l = map(lambda x: x.id, ids_line)
partial_ids.append(line.id)
res[id] =[x for x in l if x <> line.id and x not in partial_ids]
return res
def _get_invoice_line(self, cr, uid, ids, context=None):
result = {}
for line in self.pool.get('account.invoice.line').browse(cr, uid, ids, context=context):
result[line.invoice_id.id] = True
return result.keys()
def _get_invoice_tax(self, cr, uid, ids, context=None):
result = {}
for tax in self.pool.get('account.invoice.tax').browse(cr, uid, ids, context=context):
result[tax.invoice_id.id] = True
return result.keys()
def _compute_lines(self, cr, uid, ids, name, args, context=None):
result = {}
for invoice in self.browse(cr, uid, ids, context=context):
src = []
lines = []
if invoice.move_id:
for m in invoice.move_id.line_id:
temp_lines = []
if m.reconcile_id:
temp_lines = map(lambda x: x.id, m.reconcile_id.line_id)
elif m.reconcile_partial_id:
temp_lines = map(lambda x: x.id, m.reconcile_partial_id.line_partial_ids)
lines += [x for x in temp_lines if x not in lines]
src.append(m.id)
lines = filter(lambda x: x not in src, lines)
result[invoice.id] = lines
return result
def _get_invoice_from_line(self, cr, uid, ids, context=None):
move = {}
for line in self.pool.get('account.move.line').browse(cr, uid, ids, context=context):
if line.reconcile_partial_id:
for line2 in line.reconcile_partial_id.line_partial_ids:
move[line2.move_id.id] = True
if line.reconcile_id:
for line2 in line.reconcile_id.line_id:
move[line2.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
def _get_invoice_from_reconcile(self, cr, uid, ids, context=None):
move = {}
for r in self.pool.get('account.move.reconcile').browse(cr, uid, ids, context=context):
for line in r.line_partial_ids:
move[line.move_id.id] = True
for line in r.line_id:
move[line.move_id.id] = True
invoice_ids = []
if move:
invoice_ids = self.pool.get('account.invoice').search(cr, uid, [('move_id','in',move.keys())], context=context)
return invoice_ids
_name = "account.invoice"
_inherit = ['mail.thread']
_description = 'Invoice'
_order = "id desc"
_track = {
'type': {
},
'state': {
'account.mt_invoice_paid': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'paid' and obj['type'] in ('out_invoice', 'out_refund'),
'account.mt_invoice_validated': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'open' and obj['type'] in ('out_invoice', 'out_refund'),
},
}
_columns = {
'name': fields.char('Description', size=64, select=True, readonly=True, states={'draft':[('readonly',False)]}),
'origin': fields.char('Source Document', size=64, help="Reference of the document that produced this invoice.", readonly=True, states={'draft':[('readonly',False)]}),
'supplier_invoice_number': fields.char('Supplier Invoice Number', size=64, help="The reference of this invoice as provided by the supplier.", readonly=True, states={'draft':[('readonly',False)]}),
'type': fields.selection([
('out_invoice','Customer Invoice'),
('in_invoice','Supplier Invoice'),
('out_refund','Customer Refund'),
('in_refund','Supplier Refund'),
],'Type', readonly=True, select=True, change_default=True, track_visibility='always'),
'number': fields.related('move_id','name', type='char', readonly=True, size=64, relation='account.move', store=True, string='Number'),
'internal_number': fields.char('Invoice Number', size=32, readonly=True, help="Unique number of the invoice, computed automatically when the invoice is created."),
'reference': fields.char('Invoice Reference', size=64, help="The partner reference of this invoice."),
'reference_type': fields.selection(_get_reference_type, 'Payment Reference',
required=True, readonly=True, states={'draft':[('readonly',False)]}),
'comment': fields.text('Additional Information'),
'state': fields.selection([
('draft','Draft'),
('proforma','Pro-forma'),
('proforma2','Pro-forma'),
('open','Open'),
('paid','Paid'),
('cancel','Cancelled'),
],'Status', select=True, readonly=True, track_visibility='onchange',
help=' * The \'Draft\' status is used when a user is encoding a new and unconfirmed Invoice. \
\n* The \'Pro-forma\' when invoice is in Pro-forma status,invoice does not have an invoice number. \
\n* The \'Open\' status is used when user create invoice,a invoice number is generated.Its in open status till user does not pay invoice. \
\n* The \'Paid\' status is set automatically when the invoice is paid. Its related journal entries may or may not be reconciled. \
\n* The \'Cancelled\' status is used when user cancel invoice.'),
'sent': fields.boolean('Sent', readonly=True, help="It indicates that the invoice has been sent."),
'date_invoice': fields.date('Invoice Date', readonly=True, states={'draft':[('readonly',False)]}, select=True, help="Keep empty to use the current date"),
'date_due': fields.date('Due Date', readonly=True, states={'draft':[('readonly',False)]}, select=True,
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. The payment term may compute several due dates, for example 50% now and 50% in one month, but if you want to force a due date, make sure that the payment term is not set on the invoice. If you keep the payment term and the due date empty, it means direct payment."),
'partner_id': fields.many2one('res.partner', 'Partner', change_default=True, readonly=True, required=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'payment_term': fields.many2one('account.payment.term', 'Payment Terms',readonly=True, states={'draft':[('readonly',False)]},
help="If you use payment terms, the due date will be computed automatically at the generation "\
"of accounting entries. If you keep the payment term and the due date empty, it means direct payment. "\
"The payment term may compute several due dates, for example 50% now, 50% in one month."),
'period_id': fields.many2one('account.period', 'Force Period', domain=[('state','<>','done')], help="Keep empty to use the period of the validation(invoice) date.", readonly=True, states={'draft':[('readonly',False)]}),
'account_id': fields.many2one('account.account', 'Account', required=True, readonly=True, states={'draft':[('readonly',False)]}, help="The partner account used for this invoice."),
'invoice_line': fields.one2many('account.invoice.line', 'invoice_id', 'Invoice Lines', readonly=True, states={'draft':[('readonly',False)]}),
'tax_line': fields.one2many('account.invoice.tax', 'invoice_id', 'Tax Lines', readonly=True, states={'draft':[('readonly',False)]}),
'move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, select=1, ondelete='restrict', help="Link to the automatically generated Journal Items."),
'amount_untaxed': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Subtotal', track_visibility='always',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_tax': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Tax',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'amount_total': fields.function(_amount_all, digits_compute=dp.get_precision('Account'), string='Total',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line'], 20),
'account.invoice.tax': (_get_invoice_tax, None, 20),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 20),
},
multi='all'),
'currency_id': fields.many2one('res.currency', 'Currency', required=True, readonly=True, states={'draft':[('readonly',False)]}, track_visibility='always'),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, readonly=True, states={'draft':[('readonly',False)]},
domain="[('type', 'in', {'out_invoice': ['sale'], 'out_refund': ['sale_refund'], 'in_refund': ['purchase_refund'], 'in_invoice': ['purchase']}.get(type, [])), ('company_id', '=', company_id)]"),
'company_id': fields.many2one('res.company', 'Company', required=True, change_default=True, readonly=True, states={'draft':[('readonly',False)]}),
'check_total': fields.float('Verification Total', digits_compute=dp.get_precision('Account'), readonly=True, states={'draft':[('readonly',False)]}),
'reconciled': fields.function(_reconciled, string='Paid/Reconciled', type='boolean',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, None, 50), # Check if we can remove ?
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
}, help="It indicates that the invoice has been paid and the journal entry of the invoice has been reconciled with one or several journal entries of payment."),
'partner_bank_id': fields.many2one('res.partner.bank', 'Bank Account',
help='Bank Account Number to which the invoice will be paid. A Company bank account if this is a Customer Invoice or Supplier Refund, otherwise a Partner bank account number.', readonly=True, states={'draft':[('readonly',False)]}),
'move_lines':fields.function(_get_lines, type='many2many', relation='account.move.line', string='Entry Lines'),
'residual': fields.function(_amount_residual, digits_compute=dp.get_precision('Account'), string='Balance',
store={
'account.invoice': (lambda self, cr, uid, ids, c={}: ids, ['invoice_line','move_id'], 50),
'account.invoice.tax': (_get_invoice_tax, None, 50),
'account.invoice.line': (_get_invoice_line, ['price_unit','invoice_line_tax_id','quantity','discount','invoice_id'], 50),
'account.move.line': (_get_invoice_from_line, None, 50),
'account.move.reconcile': (_get_invoice_from_reconcile, None, 50),
},
help="Remaining amount due."),
'payment_ids': fields.function(_compute_lines, relation='account.move.line', type="many2many", string='Payments'),
'move_name': fields.char('Journal Entry', size=64, readonly=True, states={'draft':[('readonly',False)]}),
'user_id': fields.many2one('res.users', 'Salesperson', readonly=True, track_visibility='onchange', states={'draft':[('readonly',False)]}),
'fiscal_position': fields.many2one('account.fiscal.position', 'Fiscal Position', readonly=True, states={'draft':[('readonly',False)]})
}
_defaults = {
'type': _get_type,
'state': 'draft',
'journal_id': _get_journal,
'currency_id': _get_currency,
'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'account.invoice', context=c),
'reference_type': 'none',
'check_total': 0.0,
'internal_number': False,
'user_id': lambda s, cr, u, c: u,
'sent': False,
}
_sql_constraints = [
('number_uniq', 'unique(number, company_id, journal_id, type)', 'Invoice Number must be unique per Company!'),
]
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
journal_obj = self.pool.get('account.journal')
if context is None:
context = {}
if context.get('active_model', '') in ['res.partner'] and context.get('active_ids', False) and context['active_ids']:
partner = self.pool.get(context['active_model']).read(cr, uid, context['active_ids'], ['supplier','customer'])[0]
if not view_type:
view_id = self.pool.get('ir.ui.view').search(cr, uid, [('name', '=', 'account.invoice.tree')])
view_type = 'tree'
if view_type == 'form':
if partner['supplier'] and not partner['customer']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.supplier.form')])
elif partner['customer'] and not partner['supplier']:
view_id = self.pool.get('ir.ui.view').search(cr,uid,[('name', '=', 'account.invoice.form')])
if view_id and isinstance(view_id, (list, tuple)):
view_id = view_id[0]
res = super(account_invoice,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
type = context.get('journal_type', False)
for field in res['fields']:
if field == 'journal_id' and type:
journal_select = journal_obj._name_search(cr, uid, '', [('type', '=', type)], context=context, limit=None, name_get_uid=1)
res['fields'][field]['selection'] = journal_select
doc = etree.XML(res['arch'])
if context.get('type', False):
for node in doc.xpath("//field[@name='partner_bank_id']"):
if context['type'] == 'in_refund':
node.set('domain', "[('partner_id.ref_companies', 'in', [company_id])]")
elif context['type'] == 'out_refund':
node.set('domain', "[('partner_id', '=', partner_id)]")
res['arch'] = etree.tostring(doc)
if view_type == 'search':
if context.get('type', 'in_invoice') in ('out_invoice', 'out_refund'):
for node in doc.xpath("//group[@name='extended filter']"):
doc.remove(node)
res['arch'] = etree.tostring(doc)
if view_type == 'tree':
partner_string = _('Customer')
if context.get('type', 'out_invoice') in ('in_invoice', 'in_refund'):
partner_string = _('Supplier')
for node in doc.xpath("//field[@name='reference']"):
node.set('invisible', '0')
for node in doc.xpath("//field[@name='partner_id']"):
node.set('string', partner_string)
res['arch'] = etree.tostring(doc)
return res
def get_log_context(self, cr, uid, context=None):
if context is None:
context = {}
res = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'invoice_form')
view_id = res and res[1] or False
context['view_id'] = view_id
return context
def invoice_print(self, cr, uid, ids, context=None):
'''
This function prints the invoice and mark it as sent, so that we can see more easily the next step of the workflow
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
self.write(cr, uid, ids, {'sent': True}, context=context)
datas = {
'ids': ids,
'model': 'account.invoice',
'form': self.read(cr, uid, ids[0], context=context)
}
return {
'type': 'ir.actions.report.xml',
'report_name': 'account.invoice',
'datas': datas,
'nodestroy' : True
}
def action_invoice_sent(self, cr, uid, ids, context=None):
'''
This function opens a window to compose an email, with the edi invoice template message loaded by default
'''
assert len(ids) == 1, 'This option should only be used for a single id at a time.'
ir_model_data = self.pool.get('ir.model.data')
try:
template_id = ir_model_data.get_object_reference(cr, uid, 'account', 'email_template_edi_invoice')[1]
except ValueError:
template_id = False
try:
compose_form_id = ir_model_data.get_object_reference(cr, uid, 'mail', 'email_compose_message_wizard_form')[1]
except ValueError:
compose_form_id = False
ctx = dict(context)
ctx.update({
'default_model': 'account.invoice',
'default_res_id': ids[0],
'default_use_template': bool(template_id),
'default_template_id': template_id,
'default_composition_mode': 'comment',
'mark_invoice_as_sent': True,
})
return {
'type': 'ir.actions.act_window',
'view_type': 'form',
'view_mode': 'form',
'res_model': 'mail.compose.message',
'views': [(compose_form_id, 'form')],
'view_id': compose_form_id,
'target': 'new',
'context': ctx,
}
def confirm_paid(self, cr, uid, ids, context=None):
if context is None:
context = {}
self.write(cr, uid, ids, {'state':'paid'}, context=context)
return True
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
invoices = self.read(cr, uid, ids, ['state','internal_number'], context=context)
unlink_ids = []
for t in invoices:
if t['state'] not in ('draft', 'cancel'):
raise openerp.exceptions.Warning(_('You cannot delete an invoice which is not draft or cancelled. You should refund it instead.'))
elif t['internal_number']:
raise openerp.exceptions.Warning(_('You cannot delete an invoice after it has been validated (and received a number). You can set it back to "Draft" state and modify its content, then re-confirm it.'))
else:
unlink_ids.append(t['id'])
osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
return True
def onchange_partner_id(self, cr, uid, ids, type, partner_id,\
date_invoice=False, payment_term=False, partner_bank_id=False, company_id=False):
partner_payment_term = False
acc_id = False
bank_id = False
fiscal_position = False
opt = [('uid', str(uid))]
if partner_id:
opt.insert(0, ('id', partner_id))
p = self.pool.get('res.partner').browse(cr, uid, partner_id)
if company_id:
if (p.property_account_receivable.company_id and (p.property_account_receivable.company_id.id != company_id)) and (p.property_account_payable.company_id and (p.property_account_payable.company_id.id != company_id)):
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('res_id','=','res.partner,'+str(partner_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr,uid,[('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr,uid,[('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr,uid,rec_pro_id,['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr,uid,pay_pro_id,['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of accounts for this company, you should create one.'))
account_obj = self.pool.get('account.account')
rec_obj_acc = account_obj.browse(cr, uid, [rec_res_id])
pay_obj_acc = account_obj.browse(cr, uid, [pay_res_id])
p.property_account_receivable = rec_obj_acc[0]
p.property_account_payable = pay_obj_acc[0]
if type in ('out_invoice', 'out_refund'):
acc_id = p.property_account_receivable.id
partner_payment_term = p.property_payment_term and p.property_payment_term.id or False
else:
acc_id = p.property_account_payable.id
partner_payment_term = p.property_supplier_payment_term and p.property_supplier_payment_term.id or False
fiscal_position = p.property_account_position and p.property_account_position.id or False
if p.commercial_partner_id.bank_ids:
bank_id = p.commercial_partner_id.bank_ids[0].id
result = {'value': {
'account_id': acc_id,
'payment_term': partner_payment_term,
'fiscal_position': fiscal_position
}
}
if type in ('in_invoice', 'in_refund'):
result['value']['partner_bank_id'] = bank_id
if payment_term != partner_payment_term:
if partner_payment_term:
to_update = self.onchange_payment_term_date_invoice(
cr, uid, ids, partner_payment_term, date_invoice)
result['value'].update(to_update['value'])
else:
result['value']['date_due'] = False
if partner_bank_id != bank_id:
to_update = self.onchange_partner_bank(cr, uid, ids, bank_id)
result['value'].update(to_update['value'])
return result
def onchange_journal_id(self, cr, uid, ids, journal_id=False, context=None):
result = {}
if journal_id:
journal = self.pool.get('account.journal').browse(cr, uid, journal_id, context=context)
currency_id = journal.currency and journal.currency.id or journal.company_id.currency_id.id
company_id = journal.company_id.id
result = {'value': {
'currency_id': currency_id,
'company_id': company_id,
}
}
return result
def onchange_payment_term_date_invoice(self, cr, uid, ids, payment_term_id, date_invoice):
res = {}
if isinstance(ids, (int, long)):
ids = [ids]
if not date_invoice:
date_invoice = time.strftime('%Y-%m-%d')
if not payment_term_id:
inv = self.browse(cr, uid, ids[0])
#To make sure the invoice due date should contain due date which is entered by user when there is no payment term defined
return {'value':{'date_due': inv.date_due and inv.date_due or date_invoice}}
pterm_list = self.pool.get('account.payment.term').compute(cr, uid, payment_term_id, value=1, date_ref=date_invoice)
if pterm_list:
pterm_list = [line[0] for line in pterm_list]
pterm_list.sort()
res = {'value':{'date_due': pterm_list[-1]}}
else:
raise osv.except_osv(_('Insufficient Data!'), _('The payment term of supplier does not have a payment term line.'))
return res
def onchange_invoice_line(self, cr, uid, ids, lines):
return {}
def onchange_partner_bank(self, cursor, user, ids, partner_bank_id=False):
return {'value': {}}
def onchange_company_id(self, cr, uid, ids, company_id, part_id, type, invoice_line, currency_id):
#TODO: add the missing context parameter when forward-porting in trunk so we can remove
# this hack!
context = self.pool['res.users'].context_get(cr, uid)
val = {}
dom = {}
obj_journal = self.pool.get('account.journal')
account_obj = self.pool.get('account.account')
inv_line_obj = self.pool.get('account.invoice.line')
if company_id and part_id and type:
acc_id = False
partner_obj = self.pool.get('res.partner').browse(cr,uid,part_id)
if partner_obj.property_account_payable and partner_obj.property_account_receivable:
if partner_obj.property_account_payable.company_id.id != company_id and partner_obj.property_account_receivable.company_id.id != company_id:
property_obj = self.pool.get('ir.property')
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('res_id','=','res.partner,'+str(part_id)+''),('company_id','=',company_id)])
if not rec_pro_id:
rec_pro_id = property_obj.search(cr, uid, [('name','=','property_account_receivable'),('company_id','=',company_id)])
if not pay_pro_id:
pay_pro_id = property_obj.search(cr, uid, [('name','=','property_account_payable'),('company_id','=',company_id)])
rec_line_data = property_obj.read(cr, uid, rec_pro_id, ['name','value_reference','res_id'])
pay_line_data = property_obj.read(cr, uid, pay_pro_id, ['name','value_reference','res_id'])
rec_res_id = rec_line_data and rec_line_data[0].get('value_reference',False) and int(rec_line_data[0]['value_reference'].split(',')[1]) or False
pay_res_id = pay_line_data and pay_line_data[0].get('value_reference',False) and int(pay_line_data[0]['value_reference'].split(',')[1]) or False
if not rec_res_id and not pay_res_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
if type in ('out_invoice', 'out_refund'):
acc_id = rec_res_id
else:
acc_id = pay_res_id
val= {'account_id': acc_id}
if ids:
if company_id:
inv_obj = self.browse(cr,uid,ids)
for line in inv_obj[0].invoice_line:
if line.account_id:
if line.account_id.company_id.id != company_id:
result_id = account_obj.search(cr, uid, [('name','=',line.account_id.name),('company_id','=',company_id)])
if not result_id:
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find a chart of account, you should create one from Settings\Configuration\Accounting menu.'))
inv_line_obj.write(cr, uid, [line.id], {'account_id': result_id[-1]})
else:
if invoice_line:
for inv_line in invoice_line:
obj_l = account_obj.browse(cr, uid, inv_line[2]['account_id'])
if obj_l.company_id.id != company_id:
raise osv.except_osv(_('Configuration Error!'),
_('Invoice line account\'s company and invoice\'s company does not match.'))
else:
continue
if company_id and type:
journal_mapping = {
'out_invoice': 'sale',
'out_refund': 'sale_refund',
'in_refund': 'purchase_refund',
'in_invoice': 'purchase',
}
journal_type = journal_mapping[type]
journal_ids = obj_journal.search(cr, uid, [('company_id','=',company_id), ('type', '=', journal_type)])
if journal_ids:
val['journal_id'] = journal_ids[0]
ir_values_obj = self.pool.get('ir.values')
res_journal_default = ir_values_obj.get(cr, uid, 'default', 'type=%s' % (type), ['account.invoice'])
for r in res_journal_default:
if r[1] == 'journal_id' and r[2] in journal_ids:
val['journal_id'] = r[2]
if not val.get('journal_id', False):
journal_type_map = dict(obj_journal._columns['type'].selection)
journal_type_label = self.pool['ir.translation']._get_source(cr, uid, None, ('code','selection'),
context.get('lang'),
journal_type_map.get(journal_type))
raise osv.except_osv(_('Configuration Error!'),
_('Cannot find any account journal of %s type for this company.\n\nYou can create one in the menu: \nConfiguration\Journals\Journals.') % ('"%s"' % journal_type_label))
dom = {'journal_id': [('id', 'in', journal_ids)]}
else:
journal_ids = obj_journal.search(cr, uid, [])
return {'value': val, 'domain': dom}
# go from canceled state to draft state
def action_cancel_draft(self, cr, uid, ids, *args):
self.write(cr, uid, ids, {'state':'draft'})
wf_service = netsvc.LocalService("workflow")
for inv_id in ids:
wf_service.trg_delete(uid, 'account.invoice', inv_id, cr)
wf_service.trg_create(uid, 'account.invoice', inv_id, cr)
return True
# Workflow stuff
#################
# return the ids of the move lines which has the same account than the invoice
# whose id is in ids
def move_line_id_payment_get(self, cr, uid, ids, *args):
if not ids: return []
result = self.move_line_id_payment_gets(cr, uid, ids, *args)
return result.get(ids[0], [])
def move_line_id_payment_gets(self, cr, uid, ids, *args):
res = {}
if not ids: return res
cr.execute('SELECT i.id, l.id '\
'FROM account_move_line l '\
'LEFT JOIN account_invoice i ON (i.move_id=l.move_id) '\
'WHERE i.id IN %s '\
'AND l.account_id=i.account_id',
(tuple(ids),))
for r in cr.fetchall():
res.setdefault(r[0], [])
res[r[0]].append( r[1] )
return res
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({
'state':'draft',
'number':False,
'move_id':False,
'move_name':False,
'internal_number': False,
'period_id': False,
'sent': False,
})
if 'date_invoice' not in default:
default.update({
'date_invoice':False
})
if 'date_due' not in default:
default.update({
'date_due':False
})
return super(account_invoice, self).copy(cr, uid, id, default, context)
def test_paid(self, cr, uid, ids, *args):
res = self.move_line_id_payment_get(cr, uid, ids)
if not res:
return False
ok = True
for id in res:
cr.execute('select reconcile_id from account_move_line where id=%s', (id,))
ok = ok and bool(cr.fetchone()[0])
return ok
def button_reset_taxes(self, cr, uid, ids, context=None):
if context is None:
context = {}
ctx = context.copy()
ait_obj = self.pool.get('account.invoice.tax')
for id in ids:
cr.execute("DELETE FROM account_invoice_tax WHERE invoice_id=%s AND manual is False", (id,))
partner = self.browse(cr, uid, id, context=ctx).partner_id
if partner.lang:
ctx.update({'lang': partner.lang})
for taxe in ait_obj.compute(cr, uid, id, context=ctx).values():
ait_obj.create(cr, uid, taxe)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {'invoice_line':[]}, context=ctx)
return True
def button_compute(self, cr, uid, ids, context=None, set_total=False):
self.button_reset_taxes(cr, uid, ids, context)
for inv in self.browse(cr, uid, ids, context=context):
if set_total:
self.pool.get('account.invoice').write(cr, uid, [inv.id], {'check_total': inv.amount_total})
return True
def _convert_ref(self, cr, uid, ref):
return (ref or '').replace('/','')
def _get_analytic_lines(self, cr, uid, id, context=None):
if context is None:
context = {}
inv = self.browse(cr, uid, id)
cur_obj = self.pool.get('res.currency')
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
if inv.type in ('out_invoice', 'in_refund'):
sign = 1
else:
sign = -1
iml = self.pool.get('account.invoice.line').move_line_get(cr, uid, inv.id, context=context)
for il in iml:
if il['account_analytic_id']:
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
else:
ref = self._convert_ref(cr, uid, inv.number)
if not inv.journal_id.analytic_journal_id:
raise osv.except_osv(_('No Analytic Journal!'),_("You have to define an analytic journal on the '%s' journal!") % (inv.journal_id.name,))
il['analytic_lines'] = [(0,0, {
'name': il['name'],
'date': inv['date_invoice'],
'account_id': il['account_analytic_id'],
'unit_amount': il['quantity'],
'amount': cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, il['price'], context={'date': inv.date_invoice}) * sign,
'product_id': il['product_id'],
'product_uom_id': il['uos_id'],
'general_account_id': il['account_id'],
'journal_id': inv.journal_id.analytic_journal_id.id,
'ref': ref,
})]
return iml
def action_date_assign(self, cr, uid, ids, *args):
for inv in self.browse(cr, uid, ids):
res = self.onchange_payment_term_date_invoice(cr, uid, inv.id, inv.payment_term.id, inv.date_invoice)
if res and res['value']:
self.write(cr, uid, [inv.id], res['value'])
return True
def finalize_invoice_move_lines(self, cr, uid, invoice_browse, move_lines):
"""finalize_invoice_move_lines(cr, uid, invoice, move_lines) -> move_lines
Hook method to be overridden in additional modules to verify and possibly alter the
move lines to be created by an invoice, for special cases.
:param invoice_browse: browsable record of the invoice that is generating the move lines
:param move_lines: list of dictionaries with the account.move.lines (as for create())
:return: the (possibly updated) final move_lines to create for this invoice
"""
return move_lines
def check_tax_lines(self, cr, uid, inv, compute_taxes, ait_obj):
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id
if not inv.tax_line:
for tax in compute_taxes.values():
ait_obj.create(cr, uid, tax)
else:
tax_key = []
for tax in inv.tax_line:
if tax.manual:
continue
key = (tax.tax_code_id.id, tax.base_code_id.id, tax.account_id.id, tax.account_analytic_id.id)
tax_key.append(key)
if not key in compute_taxes:
raise osv.except_osv(_('Warning!'), _('Global taxes defined, but they are not in invoice lines !'))
base = compute_taxes[key]['base']
if abs(base - tax.base) > company_currency.rounding:
raise osv.except_osv(_('Warning!'), _('Tax base different!\nClick on compute to update the tax base.'))
for key in compute_taxes:
if not key in tax_key:
raise osv.except_osv(_('Warning!'), _('Taxes are missing!\nClick on compute button.'))
def compute_invoice_totals(self, cr, uid, inv, company_currency, ref, invoice_move_lines, context=None):
if context is None:
context={}
total = 0
total_currency = 0
cur_obj = self.pool.get('res.currency')
for i in invoice_move_lines:
if inv.currency_id.id != company_currency:
context.update({'date': inv.date_invoice or time.strftime('%Y-%m-%d')})
i['currency_id'] = inv.currency_id.id
i['amount_currency'] = i['price']
i['price'] = cur_obj.compute(cr, uid, inv.currency_id.id,
company_currency, i['price'],
context=context)
else:
i['amount_currency'] = False
i['currency_id'] = False
i['ref'] = ref
if inv.type in ('out_invoice','in_refund'):
total += i['price']
total_currency += i['amount_currency'] or i['price']
i['price'] = - i['price']
else:
total -= i['price']
total_currency -= i['amount_currency'] or i['price']
return total, total_currency, invoice_move_lines
def inv_line_characteristic_hashcode(self, invoice, invoice_line):
"""Overridable hashcode generation for invoice lines. Lines having the same hashcode
will be grouped together if the journal has the 'group line' option. Of course a module
can add fields to invoice lines that would need to be tested too before merging lines
or not."""
return "%s-%s-%s-%s-%s"%(
invoice_line['account_id'],
invoice_line.get('tax_code_id',"False"),
invoice_line.get('product_id',"False"),
invoice_line.get('analytic_account_id',"False"),
invoice_line.get('date_maturity',"False"))
def group_lines(self, cr, uid, iml, line, inv):
"""Merge account move lines (and hence analytic lines) if invoice line hashcodes are equals"""
if inv.journal_id.group_invoice_lines:
line2 = {}
for x, y, l in line:
tmp = self.inv_line_characteristic_hashcode(inv, l)
if tmp in line2:
am = line2[tmp]['debit'] - line2[tmp]['credit'] + (l['debit'] - l['credit'])
line2[tmp]['debit'] = (am > 0) and am or 0.0
line2[tmp]['credit'] = (am < 0) and -am or 0.0
line2[tmp]['tax_amount'] += l['tax_amount']
line2[tmp]['analytic_lines'] += l['analytic_lines']
else:
line2[tmp] = l
line = []
for key, val in line2.items():
line.append((0,0,val))
return line
def action_move_create(self, cr, uid, ids, context=None):
"""Creates invoice related analytics and financial move lines"""
ait_obj = self.pool.get('account.invoice.tax')
cur_obj = self.pool.get('res.currency')
period_obj = self.pool.get('account.period')
payment_term_obj = self.pool.get('account.payment.term')
journal_obj = self.pool.get('account.journal')
move_obj = self.pool.get('account.move')
if context is None:
context = {}
for inv in self.browse(cr, uid, ids, context=context):
if not inv.journal_id.sequence_id:
raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))
if not inv.invoice_line:
raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))
if inv.move_id:
continue
ctx = context.copy()
ctx.update({'lang': inv.partner_id.lang})
if not inv.date_invoice:
self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
# create the analytical lines
# one move line per invoice line
iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)
# check if taxes are all computed
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)
self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)
# I disabled the check_total feature
group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]
group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)
if group_check_total and uid in [x.id for x in group_check_total.users]:
if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):
raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\nThe encoded total does not match the computed total.'))
if inv.payment_term:
total_fixed = total_percent = 0
for line in inv.payment_term.line_ids:
if line.value == 'fixed':
total_fixed += line.value_amount
if line.value == 'procent':
total_percent += line.value_amount
total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)
if (total_fixed + total_percent) > 100:
raise osv.except_osv(_('Error!'), _("Cannot create the invoice.\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'."))
# one move line per tax line
iml += ait_obj.move_line_get(cr, uid, inv.id)
entry_type = ''
if inv.type in ('in_invoice', 'in_refund'):
ref = inv.reference
entry_type = 'journal_pur_voucher'
if inv.type == 'in_refund':
entry_type = 'cont_voucher'
else:
ref = self._convert_ref(cr, uid, inv.number)
entry_type = 'journal_sale_vou'
if inv.type == 'out_refund':
entry_type = 'cont_voucher'
diff_currency_p = inv.currency_id.id <> company_currency
# create one move line for the total and possibly adjust the other lines amount
total = 0
total_currency = 0
total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)
acc_id = inv.account_id.id
name = inv['name'] or inv['supplier_invoice_number'] or '/'
totlines = False
if inv.payment_term:
totlines = payment_term_obj.compute(cr,
uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)
if totlines:
res_amount_currency = total_currency
i = 0
ctx.update({'date': inv.date_invoice})
for t in totlines:
if inv.currency_id.id != company_currency:
amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)
else:
amount_currency = False
# last line add the diff
res_amount_currency -= amount_currency or 0
i += 1
if i == len(totlines):
amount_currency += res_amount_currency
iml.append({
'type': 'dest',
'name': name,
'price': t[1],
'account_id': acc_id,
'date_maturity': t[0],
'amount_currency': diff_currency_p \
and amount_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref,
})
else:
iml.append({
'type': 'dest',
'name': name,
'price': total,
'account_id': acc_id,
'date_maturity': inv.date_due or False,
'amount_currency': diff_currency_p \
and total_currency or False,
'currency_id': diff_currency_p \
and inv.currency_id.id or False,
'ref': ref
})
date = inv.date_invoice or time.strftime('%Y-%m-%d')
part = self.pool.get("res.partner")._find_accounting_partner(inv.partner_id)
line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)
line = self.group_lines(cr, uid, iml, line, inv)
journal_id = inv.journal_id.id
journal = journal_obj.browse(cr, uid, journal_id, context=ctx)
if journal.centralisation:
raise osv.except_osv(_('User Error!'),
_('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))
line = self.finalize_invoice_move_lines(cr, uid, inv, line)
move = {
'ref': inv.reference or inv.supplier_invoice_number or inv.name,
'line_id': line,
'journal_id': journal_id,
'date': date,
'narration': inv.comment,
'company_id': inv.company_id.id,
}
period_id = inv.period_id and inv.period_id.id or False
ctx.update(company_id=inv.company_id.id,
account_period_prefer_normal=True)
if not period_id:
period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)
period_id = period_ids and period_ids[0] or False
if period_id:
move['period_id'] = period_id
for i in line:
i[2]['period_id'] = period_id
ctx.update(invoice=inv)
move_id = move_obj.create(cr, uid, move, context=ctx)
new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name
# make the invoice point to that move
self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)
# Pass invoice in context in method post: used if you want to get the same
# account move reference when creating the same invoice after a cancelled one:
move_obj.post(cr, uid, [move_id], context=ctx)
self._log_event(cr, uid, ids)
return True
def invoice_validate(self, cr, uid, ids, context=None):
self.write(cr, uid, ids, {'state':'open'}, context=context)
return True
def line_get_convert(self, cr, uid, x, part, date, context=None):
return {
'date_maturity': x.get('date_maturity', False),
'partner_id': part,
'name': x['name'][:64],
'date': date,
'debit': x['price']>0 and x['price'],
'credit': x['price']<0 and -x['price'],
'account_id': x['account_id'],
'analytic_lines': x.get('analytic_lines', []),
'amount_currency': x['price']>0 and abs(x.get('amount_currency', False)) or -abs(x.get('amount_currency', False)),
'currency_id': x.get('currency_id', False),
'tax_code_id': x.get('tax_code_id', False),
'tax_amount': x.get('tax_amount', False),
'ref': x.get('ref', False),
'quantity': x.get('quantity',1.00),
'product_id': x.get('product_id', False),
'product_uom_id': x.get('uos_id', False),
'analytic_account_id': x.get('account_analytic_id', False),
}
def action_number(self, cr, uid, ids, context=None):
if context is None:
context = {}
#TODO: not correct fix but required a frech values before reading it.
self.write(cr, uid, ids, {})
for obj_inv in self.browse(cr, uid, ids, context=context):
invtype = obj_inv.type
number = obj_inv.number
move_id = obj_inv.move_id and obj_inv.move_id.id or False
reference = obj_inv.reference or ''
self.write(cr, uid, ids, {'internal_number': number})
if invtype in ('in_invoice', 'in_refund'):
if not reference:
ref = self._convert_ref(cr, uid, number)
else:
ref = reference
else:
ref = self._convert_ref(cr, uid, number)
cr.execute('UPDATE account_move SET ref=%s ' \
'WHERE id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_move_line SET ref=%s ' \
'WHERE move_id=%s AND (ref is null OR ref = \'\')',
(ref, move_id))
cr.execute('UPDATE account_analytic_line SET ref=%s ' \
'FROM account_move_line ' \
'WHERE account_move_line.move_id = %s ' \
'AND account_analytic_line.move_id = account_move_line.id',
(ref, move_id))
return True
def action_proforma(self, cr, uid, ids, context=None):
"""
Check if all taxes are present with the correct base amount
on creating a proforma invoice. This leaves room for manual
corrections of the tax amount.
"""
if not ids:
return True
if isinstance(ids, (int, long)):
ids = [ids]
ait_obj = self.pool.get('account.invoice.tax')
for inv in self.browse(cr, uid, ids, context=context):
compute_taxes = ait_obj.compute(cr, uid, inv.id, context=context)
self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)
return self.write(
cr, uid, ids, {'state': 'proforma2'}, context=context)
def action_cancel(self, cr, uid, ids, context=None):
if context is None:
context = {}
account_move_obj = self.pool.get('account.move')
invoices = self.read(cr, uid, ids, ['move_id', 'payment_ids'])
move_ids = [] # ones that we will need to remove
for i in invoices:
if i['move_id']:
move_ids.append(i['move_id'][0])
if i['payment_ids']:
account_move_line_obj = self.pool.get('account.move.line')
pay_ids = account_move_line_obj.browse(cr, uid, i['payment_ids'])
for move_line in pay_ids:
if move_line.reconcile_partial_id and move_line.reconcile_partial_id.line_partial_ids:
raise osv.except_osv(_('Error!'), _('You cannot cancel an invoice which is partially paid. You need to unreconcile related payment entries first.'))
# First, set the invoices as cancelled and detach the move ids
self.write(cr, uid, ids, {'state':'cancel', 'move_id':False})
if move_ids:
# second, invalidate the move(s)
account_move_obj.button_cancel(cr, uid, move_ids, context=context)
# delete the move this invoice was pointing to
# Note that the corresponding move_lines and move_reconciles
# will be automatically deleted too
account_move_obj.unlink(cr, uid, move_ids, context=context)
self._log_event(cr, uid, ids, -1.0, 'Cancel Invoice')
return True
###################
def list_distinct_taxes(self, cr, uid, ids):
invoices = self.browse(cr, uid, ids)
taxes = {}
for inv in invoices:
for tax in inv.tax_line:
if not tax['name'] in taxes:
taxes[tax['name']] = {'name': tax['name']}
return taxes.values()
def _log_event(self, cr, uid, ids, factor=1.0, name='Open Invoice'):
#TODO: implement messages system
return True
def name_get(self, cr, uid, ids, context=None):
if not ids:
return []
types = {
'out_invoice': _('Invoice'),
'in_invoice': _('Supplier Invoice'),
'out_refund': _('Refund'),
'in_refund': _('Supplier Refund'),
}
return [(r['id'], '%s %s' % (r['number'] or types[r['type']], r['name'] or '')) for r in self.read(cr, uid, ids, ['type', 'number', 'name'], context, load='_classic_write')]
def name_search(self, cr, user, name, args=None, operator='ilike', context=None, limit=100):
if not args:
args = []
if context is None:
context = {}
ids = []
if name:
ids = self.search(cr, user, [('number','=',name)] + args, limit=limit, context=context)
if not ids:
ids = self.search(cr, user, [('name',operator,name)] + args, limit=limit, context=context)
return self.name_get(cr, user, ids, context)
def _refund_cleanup_lines(self, cr, uid, lines, context=None):
"""Convert records to dict of values suitable for one2many line creation
:param list(browse_record) lines: records to convert
:return: list of command tuple for one2many line creation [(0, 0, dict of valueis), ...]
"""
clean_lines = []
for line in lines:
clean_line = {}
for field in line._all_columns.keys():
if line._all_columns[field].column._type == 'many2one':
clean_line[field] = line[field].id
elif line._all_columns[field].column._type not in ['many2many','one2many']:
clean_line[field] = line[field]
elif field == 'invoice_line_tax_id':
tax_list = []
for tax in line[field]:
tax_list.append(tax.id)
clean_line[field] = [(6,0, tax_list)]
clean_lines.append(clean_line)
return map(lambda x: (0,0,x), clean_lines)
def _prepare_refund(self, cr, uid, invoice, date=None, period_id=None, description=None, journal_id=None, context=None):
"""Prepare the dict of values to create the new refund from the invoice.
This method may be overridden to implement custom
refund generation (making sure to call super() to establish
a clean extension chain).
:param integer invoice_id: id of the invoice to refund
:param dict invoice: read of the invoice to refund
:param string date: refund creation date from the wizard
:param integer period_id: force account.period from the wizard
:param string description: description of the refund from the wizard
:param integer journal_id: account.journal from the wizard
:return: dict of value to create() the refund
"""
obj_journal = self.pool.get('account.journal')
type_dict = {
'out_invoice': 'out_refund', # Customer Invoice
'in_invoice': 'in_refund', # Supplier Invoice
'out_refund': 'out_invoice', # Customer Refund
'in_refund': 'in_invoice', # Supplier Refund
}
invoice_data = {}
for field in ['name', 'reference', 'comment', 'date_due', 'partner_id', 'company_id',
'account_id', 'currency_id', 'payment_term', 'user_id', 'fiscal_position']:
if invoice._all_columns[field].column._type == 'many2one':
invoice_data[field] = invoice[field].id
else:
invoice_data[field] = invoice[field] if invoice[field] else False
invoice_lines = self._refund_cleanup_lines(cr, uid, invoice.invoice_line, context=context)
tax_lines = filter(lambda l: l['manual'], invoice.tax_line)
tax_lines = self._refund_cleanup_lines(cr, uid, tax_lines, context=context)
if journal_id:
refund_journal_ids = [journal_id]
elif invoice['type'] == 'in_invoice':
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','purchase_refund')], context=context)
else:
refund_journal_ids = obj_journal.search(cr, uid, [('type','=','sale_refund')], context=context)
if not date:
date = time.strftime('%Y-%m-%d')
invoice_data.update({
'type': type_dict[invoice['type']],
'date_invoice': date,
'state': 'draft',
'number': False,
'invoice_line': invoice_lines,
'tax_line': tax_lines,
'journal_id': refund_journal_ids and refund_journal_ids[0] or False,
})
if period_id:
invoice_data['period_id'] = period_id
if description:
invoice_data['name'] = description
return invoice_data
def refund(self, cr, uid, ids, date=None, period_id=None, description=None, journal_id=None, context=None):
new_ids = []
for invoice in self.browse(cr, uid, ids, context=context):
invoice = self._prepare_refund(cr, uid, invoice,
date=date,
period_id=period_id,
description=description,
journal_id=journal_id,
context=context)
# create the new invoice
new_ids.append(self.create(cr, uid, invoice, context=context))
return new_ids
def pay_and_reconcile(self, cr, uid, ids, pay_amount, pay_account_id, period_id, pay_journal_id, writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context=None, name=''):
if context is None:
context = {}
#TODO check if we can use different period for payment and the writeoff line
assert len(ids)==1, "Can only pay one invoice at a time."
invoice = self.browse(cr, uid, ids[0], context=context)
src_account_id = invoice.account_id.id
# Take the seq as name for move
types = {'out_invoice': -1, 'in_invoice': 1, 'out_refund': 1, 'in_refund': -1}
direction = types[invoice.type]
#take the choosen date
if 'date_p' in context and context['date_p']:
date=context['date_p']
else:
date=time.strftime('%Y-%m-%d')
# Take the amount in currency and the currency of the payment
if 'amount_currency' in context and context['amount_currency'] and 'currency_id' in context and context['currency_id']:
amount_currency = context['amount_currency']
currency_id = context['currency_id']
else:
amount_currency = False
currency_id = False
pay_journal = self.pool.get('account.journal').read(cr, uid, pay_journal_id, ['type'], context=context)
if invoice.type in ('in_invoice', 'out_invoice'):
if pay_journal['type'] == 'bank':
entry_type = 'bank_pay_voucher' # Bank payment
else:
entry_type = 'pay_voucher' # Cash payment
else:
entry_type = 'cont_voucher'
if invoice.type in ('in_invoice', 'in_refund'):
ref = invoice.reference
else:
ref = self._convert_ref(cr, uid, invoice.number)
partner = self.pool['res.partner']._find_accounting_partner(invoice.partner_id)
# Pay attention to the sign for both debit/credit AND amount_currency
l1 = {
'debit': direction * pay_amount>0 and direction * pay_amount,
'credit': direction * pay_amount<0 and - direction * pay_amount,
'account_id': src_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
l2 = {
'debit': direction * pay_amount<0 and - direction * pay_amount,
'credit': direction * pay_amount>0 and direction * pay_amount,
'account_id': pay_account_id,
'partner_id': partner.id,
'ref':ref,
'date': date,
'currency_id':currency_id,
'amount_currency':amount_currency and - direction * amount_currency or 0.0,
'company_id': invoice.company_id.id,
}
if not name:
name = invoice.invoice_line and invoice.invoice_line[0].name or invoice.number
l1['name'] = name
l2['name'] = name
lines = [(0, 0, l1), (0, 0, l2)]
move = {'ref': ref, 'line_id': lines, 'journal_id': pay_journal_id, 'period_id': period_id, 'date': date}
move_id = self.pool.get('account.move').create(cr, uid, move, context=context)
line_ids = []
total = 0.0
line = self.pool.get('account.move.line')
move_ids = [move_id,]
if invoice.move_id:
move_ids.append(invoice.move_id.id)
cr.execute('SELECT id FROM account_move_line '\
'WHERE move_id IN %s',
((move_id, invoice.move_id.id),))
lines = line.browse(cr, uid, map(lambda x: x[0], cr.fetchall()) )
for l in lines+invoice.payment_ids:
if l.account_id.id == src_account_id:
line_ids.append(l.id)
total += (l.debit or 0.0) - (l.credit or 0.0)
inv_id, name = self.name_get(cr, uid, [invoice.id], context=context)[0]
if (not round(total,self.pool.get('decimal.precision').precision_get(cr, uid, 'Account'))) or writeoff_acc_id:
self.pool.get('account.move.line').reconcile(cr, uid, line_ids, 'manual', writeoff_acc_id, writeoff_period_id, writeoff_journal_id, context)
else:
code = invoice.currency_id.symbol
# TODO: use currency's formatting function
msg = _("Invoice partially paid: %s%s of %s%s (%s%s remaining).") % \
(pay_amount, code, invoice.amount_total, code, total, code)
self.message_post(cr, uid, [inv_id], body=msg, context=context)
self.pool.get('account.move.line').reconcile_partial(cr, uid, line_ids, 'manual', context)
# Update the stored value (fields.function), so we write to trigger recompute
self.pool.get('account.invoice').write(cr, uid, ids, {}, context=context)
return True
class account_invoice_line(osv.osv):
def _amount_line(self, cr, uid, ids, prop, unknow_none, unknow_dict):
res = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
for line in self.browse(cr, uid, ids):
price = line.price_unit * (1-(line.discount or 0.0)/100.0)
taxes = tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, price, line.quantity, product=line.product_id, partner=line.invoice_id.partner_id)
res[line.id] = taxes['total']
if line.invoice_id:
cur = line.invoice_id.currency_id
res[line.id] = cur_obj.round(cr, uid, cur, res[line.id])
return res
def _price_unit_default(self, cr, uid, context=None):
if context is None:
context = {}
if context.get('check_total', False):
t = context['check_total']
for l in context.get('invoice_line', {}):
if isinstance(l, (list, tuple)) and len(l) >= 3 and l[2]:
tax_obj = self.pool.get('account.tax')
p = l[2].get('price_unit', 0) * (1-l[2].get('discount', 0)/100.0)
t = t - (p * l[2].get('quantity'))
taxes = l[2].get('invoice_line_tax_id')
if len(taxes[0]) >= 3 and taxes[0][2]:
taxes = tax_obj.browse(cr, uid, list(taxes[0][2]))
for tax in tax_obj.compute_all(cr, uid, taxes, p,l[2].get('quantity'), l[2].get('product_id', False), context.get('partner_id', False))['taxes']:
t = t - tax['amount']
return t
return 0
_name = "account.invoice.line"
_description = "Invoice Line"
_order = "invoice_id,sequence,id"
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document', size=256, help="Reference of the document that produced this invoice."),
'sequence': fields.integer('Sequence', help="Gives the sequence of this line when displaying the invoice."),
'invoice_id': fields.many2one('account.invoice', 'Invoice Reference', ondelete='cascade', select=True),
'uos_id': fields.many2one('product.uom', 'Unit of Measure', ondelete='set null', select=True),
'product_id': fields.many2one('product.product', 'Product', ondelete='set null', select=True),
'account_id': fields.many2one('account.account', 'Account', required=True, domain=[('type','<>','view'), ('type', '<>', 'closed')], help="The income or expense account related to the selected product."),
'price_unit': fields.float('Unit Price', required=True, digits_compute= dp.get_precision('Product Price')),
'price_subtotal': fields.function(_amount_line, string='Amount', type="float",
digits_compute= dp.get_precision('Account'), store=True),
'quantity': fields.float('Quantity', digits_compute= dp.get_precision('Product Unit of Measure'), required=True),
'discount': fields.float('Discount (%)', digits_compute= dp.get_precision('Discount')),
'invoice_line_tax_id': fields.many2many('account.tax', 'account_invoice_line_tax', 'invoice_line_id', 'tax_id', 'Taxes', domain=[('parent_id','=',False)]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic Account'),
'company_id': fields.related('invoice_id','company_id',type='many2one',relation='res.company',string='Company', store=True, readonly=True),
'partner_id': fields.related('invoice_id','partner_id',type='many2one',relation='res.partner',string='Partner',store=True)
}
def _default_account_id(self, cr, uid, context=None):
# XXX this gets the default account for the user's company,
# it should get the default account for the invoice's company
# however, the invoice's company does not reach this point
if context is None:
context = {}
if context.get('type') in ('out_invoice','out_refund'):
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_income_categ', 'product.category', context=context)
else:
prop = self.pool.get('ir.property').get(cr, uid, 'property_account_expense_categ', 'product.category', context=context)
return prop and prop.id or False
_defaults = {
'quantity': 1,
'discount': 0.0,
'price_unit': _price_unit_default,
'account_id': _default_account_id,
'sequence': 10,
}
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
if context is None:
context = {}
res = super(account_invoice_line,self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
if context.get('type', False):
doc = etree.XML(res['arch'])
for node in doc.xpath("//field[@name='product_id']"):
if context['type'] in ('in_invoice', 'in_refund'):
node.set('domain', "[('purchase_ok', '=', True)]")
else:
node.set('domain', "[('sale_ok', '=', True)]")
res['arch'] = etree.tostring(doc)
return res
def product_id_change(self, cr, uid, ids, product, uom_id, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id, 'force_company': company_id})
if not partner_id:
raise osv.except_osv(_('No Partner Defined!'),_("You must first select a partner!") )
if not product:
if type in ('in_invoice', 'in_refund'):
return {'value': {}, 'domain':{'product_uom':[]}}
else:
return {'value': {'price_unit': 0.0}, 'domain':{'product_uom':[]}}
part = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
fpos_obj = self.pool.get('account.fiscal.position')
fpos = fposition_id and fpos_obj.browse(cr, uid, fposition_id, context=context) or False
if part.lang:
context.update({'lang': part.lang})
result = {}
res = self.pool.get('product.product').browse(cr, uid, product, context=context)
if type in ('out_invoice','out_refund'):
a = res.property_account_income.id
if not a:
a = res.categ_id.property_account_income_categ.id
else:
a = res.property_account_expense.id
if not a:
a = res.categ_id.property_account_expense_categ.id
a = fpos_obj.map_account(cr, uid, fpos, a)
if a:
result['account_id'] = a
if type in ('out_invoice', 'out_refund'):
taxes = res.taxes_id and res.taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
else:
taxes = res.supplier_taxes_id and res.supplier_taxes_id or (a and self.pool.get('account.account').browse(cr, uid, a, context=context).tax_ids or False)
tax_id = fpos_obj.map_tax(cr, uid, fpos, taxes)
if type in ('in_invoice', 'in_refund'):
result.update( {'price_unit': price_unit or res.standard_price,'invoice_line_tax_id': tax_id} )
else:
result.update({'price_unit': res.list_price, 'invoice_line_tax_id': tax_id})
result['name'] = res.partner_ref
result['uos_id'] = uom_id or res.uom_id.id
if res.description:
result['name'] += '\n'+res.description
domain = {'uos_id':[('category_id','=',res.uom_id.category_id.id)]}
res_final = {'value':result, 'domain':domain}
if not company_id or not currency_id:
return res_final
company = self.pool.get('res.company').browse(cr, uid, company_id, context=context)
currency = self.pool.get('res.currency').browse(cr, uid, currency_id, context=context)
if company.currency_id.id != currency.id:
if type in ('in_invoice', 'in_refund'):
res_final['value']['price_unit'] = res.standard_price
new_price = res_final['value']['price_unit'] * currency.rate
res_final['value']['price_unit'] = new_price
if result['uos_id'] and result['uos_id'] != res.uom_id.id:
selected_uom = self.pool.get('product.uom').browse(cr, uid, result['uos_id'], context=context)
new_price = self.pool.get('product.uom')._compute_price(cr, uid, res.uom_id.id, res_final['value']['price_unit'], result['uos_id'])
res_final['value']['price_unit'] = new_price
return res_final
def uos_id_change(self, cr, uid, ids, product, uom, qty=0, name='', type='out_invoice', partner_id=False, fposition_id=False, price_unit=False, currency_id=False, context=None, company_id=None):
if context is None:
context = {}
company_id = company_id if company_id != None else context.get('company_id',False)
context = dict(context)
context.update({'company_id': company_id})
warning = {}
res = self.product_id_change(cr, uid, ids, product, uom, qty, name, type, partner_id, fposition_id, price_unit, currency_id, context=context)
if not uom:
res['value']['price_unit'] = 0.0
if product and uom:
prod = self.pool.get('product.product').browse(cr, uid, product, context=context)
prod_uom = self.pool.get('product.uom').browse(cr, uid, uom, context=context)
if prod.uom_id.category_id.id != prod_uom.category_id.id:
warning = {
'title': _('Warning!'),
'message': _('The selected unit of measure is not compatible with the unit of measure of the product.')
}
res['value'].update({'uos_id': prod.uom_id.id})
return {'value': res['value'], 'warning': warning}
return res
def move_line_get(self, cr, uid, invoice_id, context=None):
res = []
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
if context is None:
context = {}
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
mres = self.move_line_get_item(cr, uid, line, context)
if not mres:
continue
res.append(mres)
tax_code_found= False
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id,
(line.price_unit * (1.0 - (line['discount'] or 0.0) / 100.0)),
line.quantity, line.product_id,
inv.partner_id)['taxes']:
if inv.type in ('out_invoice', 'in_invoice'):
tax_code_id = tax['base_code_id']
tax_amount = line.price_subtotal * tax['base_sign']
else:
tax_code_id = tax['ref_base_code_id']
tax_amount = line.price_subtotal * tax['ref_base_sign']
if tax_code_found:
if not tax_code_id:
continue
res.append(self.move_line_get_item(cr, uid, line, context))
res[-1]['price'] = 0.0
res[-1]['account_analytic_id'] = False
elif not tax_code_id:
continue
tax_code_found = True
res[-1]['tax_code_id'] = tax_code_id
res[-1]['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, tax_amount, context={'date': inv.date_invoice})
return res
def move_line_get_item(self, cr, uid, line, context=None):
return {
'type':'src',
'name': line.name.split('\n')[0][:64],
'price_unit':line.price_unit,
'quantity':line.quantity,
'price':line.price_subtotal,
'account_id':line.account_id.id,
'product_id':line.product_id.id,
'uos_id':line.uos_id.id,
'account_analytic_id':line.account_analytic_id.id,
'taxes':line.invoice_line_tax_id,
}
#
# Set the tax field according to the account and the fiscal position
#
def onchange_account_id(self, cr, uid, ids, product_id, partner_id, inv_type, fposition_id, account_id):
if not account_id:
return {}
unique_tax_ids = []
fpos = fposition_id and self.pool.get('account.fiscal.position').browse(cr, uid, fposition_id) or False
account = self.pool.get('account.account').browse(cr, uid, account_id)
if not product_id:
taxes = account.tax_ids
unique_tax_ids = self.pool.get('account.fiscal.position').map_tax(cr, uid, fpos, taxes)
else:
product_change_result = self.product_id_change(cr, uid, ids, product_id, False, type=inv_type,
partner_id=partner_id, fposition_id=fposition_id,
company_id=account.company_id.id)
if product_change_result and 'value' in product_change_result and 'invoice_line_tax_id' in product_change_result['value']:
unique_tax_ids = product_change_result['value']['invoice_line_tax_id']
return {'value':{'invoice_line_tax_id': unique_tax_ids}}
account_invoice_line()
class account_invoice_tax(osv.osv):
_name = "account.invoice.tax"
_description = "Invoice Tax"
def _count_factor(self, cr, uid, ids, name, args, context=None):
res = {}
for invoice_tax in self.browse(cr, uid, ids, context=context):
res[invoice_tax.id] = {
'factor_base': 1.0,
'factor_tax': 1.0,
}
if invoice_tax.amount <> 0.0:
factor_tax = invoice_tax.tax_amount / invoice_tax.amount
res[invoice_tax.id]['factor_tax'] = factor_tax
if invoice_tax.base <> 0.0:
factor_base = invoice_tax.base_amount / invoice_tax.base
res[invoice_tax.id]['factor_base'] = factor_base
return res
_columns = {
'invoice_id': fields.many2one('account.invoice', 'Invoice Line', ondelete='cascade', select=True),
'name': fields.char('Tax Description', size=64, required=True),
'account_id': fields.many2one('account.account', 'Tax Account', required=True, domain=[('type','<>','view'),('type','<>','income'), ('type', '<>', 'closed')]),
'account_analytic_id': fields.many2one('account.analytic.account', 'Analytic account', readonly=True),
'base': fields.float('Base', digits_compute=dp.get_precision('Account')),
'amount': fields.float('Amount', digits_compute=dp.get_precision('Account')),
'manual': fields.boolean('Manual'),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of invoice tax."),
'base_code_id': fields.many2one('account.tax.code', 'Base Code', help="The account basis of the tax declaration."),
'base_amount': fields.float('Base Code Amount', digits_compute=dp.get_precision('Account')),
'tax_code_id': fields.many2one('account.tax.code', 'Tax Code', help="The tax basis of the tax declaration."),
'tax_amount': fields.float('Tax Code Amount', digits_compute=dp.get_precision('Account')),
'company_id': fields.related('account_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True),
'factor_base': fields.function(_count_factor, string='Multipication factor for Base code', type='float', multi="all"),
'factor_tax': fields.function(_count_factor, string='Multipication factor Tax code', type='float', multi="all")
}
def base_change(self, cr, uid, ids, base, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_base'])['factor_base']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
base = cur_obj.compute(cr, uid, currency_id, company_currency, base*factor, context={'date': date_invoice or time.strftime('%Y-%m-%d')}, round=False)
return {'value': {'base_amount':base}}
def amount_change(self, cr, uid, ids, amount, currency_id=False, company_id=False, date_invoice=False):
cur_obj = self.pool.get('res.currency')
company_obj = self.pool.get('res.company')
company_currency = False
factor = 1
if ids:
factor = self.read(cr, uid, ids[0], ['factor_tax'])['factor_tax']
if company_id:
company_currency = company_obj.read(cr, uid, [company_id], ['currency_id'])[0]['currency_id'][0]
if currency_id and company_currency:
amount = cur_obj.compute(cr, uid, currency_id, company_currency, amount*factor, context={'date': date_invoice or time.strftime('%Y-%m-%d')}, round=False)
return {'value': {'tax_amount': amount}}
_order = 'sequence'
_defaults = {
'manual': 1,
'base_amount': 0.0,
'tax_amount': 0.0,
}
def compute(self, cr, uid, invoice_id, context=None):
tax_grouped = {}
tax_obj = self.pool.get('account.tax')
cur_obj = self.pool.get('res.currency')
inv = self.pool.get('account.invoice').browse(cr, uid, invoice_id, context=context)
cur = inv.currency_id
company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id
for line in inv.invoice_line:
for tax in tax_obj.compute_all(cr, uid, line.invoice_line_tax_id, (line.price_unit* (1-(line.discount or 0.0)/100.0)), line.quantity, line.product_id, inv.partner_id)['taxes']:
val={}
val['invoice_id'] = inv.id
val['name'] = tax['name']
val['amount'] = tax['amount']
val['manual'] = False
val['sequence'] = tax['sequence']
val['base'] = cur_obj.round(cr, uid, cur, tax['price_unit'] * line['quantity'])
if inv.type in ('out_invoice','in_invoice'):
val['base_code_id'] = tax['base_code_id']
val['tax_code_id'] = tax['tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_collected_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_collected_id']
else:
val['base_code_id'] = tax['ref_base_code_id']
val['tax_code_id'] = tax['ref_tax_code_id']
val['base_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['base'] * tax['ref_base_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['tax_amount'] = cur_obj.compute(cr, uid, inv.currency_id.id, company_currency, val['amount'] * tax['ref_tax_sign'], context={'date': inv.date_invoice or time.strftime('%Y-%m-%d')}, round=False)
val['account_id'] = tax['account_paid_id'] or line.account_id.id
val['account_analytic_id'] = tax['account_analytic_paid_id']
# If the taxes generate moves on the same financial account as the invoice line
# and no default analytic account is defined at the tax level, propagate the
# analytic account from the invoice line to the tax line. This is necessary
# in situations were (part of) the taxes cannot be reclaimed,
# to ensure the tax move is allocated to the proper analytic account.
if not val.get('account_analytic_id') and line.account_analytic_id and val['account_id'] == line.account_id.id:
val['account_analytic_id'] = line.account_analytic_id.id
key = (val['tax_code_id'], val['base_code_id'], val['account_id'], val['account_analytic_id'])
if not key in tax_grouped:
tax_grouped[key] = val
else:
tax_grouped[key]['amount'] += val['amount']
tax_grouped[key]['base'] += val['base']
tax_grouped[key]['base_amount'] += val['base_amount']
tax_grouped[key]['tax_amount'] += val['tax_amount']
for t in tax_grouped.values():
t['base'] = cur_obj.round(cr, uid, cur, t['base'])
t['amount'] = cur_obj.round(cr, uid, cur, t['amount'])
t['base_amount'] = cur_obj.round(cr, uid, cur, t['base_amount'])
t['tax_amount'] = cur_obj.round(cr, uid, cur, t['tax_amount'])
return tax_grouped
def move_line_get(self, cr, uid, invoice_id):
res = []
cr.execute('SELECT * FROM account_invoice_tax WHERE invoice_id=%s', (invoice_id,))
for t in cr.dictfetchall():
if not t['amount'] \
and not t['tax_code_id'] \
and not t['tax_amount']:
continue
res.append({
'type':'tax',
'name':t['name'],
'price_unit': t['amount'],
'quantity': 1,
'price': t['amount'] or 0.0,
'account_id': t['account_id'],
'tax_code_id': t['tax_code_id'],
'tax_amount': t['tax_amount'],
'account_analytic_id': t['account_analytic_id'],
})
return res
class res_partner(osv.osv):
""" Inherits partner and adds invoice information in the partner form """
_inherit = 'res.partner'
_columns = {
'invoice_ids': fields.one2many('account.invoice.line', 'partner_id', 'Invoices', readonly=True),
}
def _find_accounting_partner(self, partner):
'''
Find the partner for which the accounting entries will be created
'''
# FIXME: after 7.0, to replace by function field partner.commercial_partner_id
#if the chosen partner is not a company and has a parent company, use the parent for the journal entries
#because you want to invoice 'Agrolait, accounting department' but the journal items are for 'Agrolait'
while not partner.is_company and partner.parent_id:
partner = partner.parent_id
return partner
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default.update({'invoice_ids' : []})
return super(res_partner, self).copy(cr, uid, id, default, context)
class mail_compose_message(osv.Model):
_inherit = 'mail.compose.message'
def send_mail(self, cr, uid, ids, context=None):
context = context or {}
if context.get('default_model') == 'account.invoice' and context.get('default_res_id') and context.get('mark_invoice_as_sent'):
context = dict(context, mail_post_autofollow=True)
self.pool.get('account.invoice').write(cr, uid, [context['default_res_id']], {'sent': True}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
izakp/hokusai | hokusai/commands/deployment.py | 1 | 1832 | from hokusai.lib.command import command
from hokusai.lib.common import print_green
from hokusai.services.deployment import Deployment
from hokusai.services.command_runner import CommandRunner
from hokusai.services.ecr import ECR
from hokusai.lib.exceptions import HokusaiError
@command()
def update(context, tag, migration, constraint, git_remote, timeout,
namespace=None, update_config=False, filename=None):
if migration is not None:
print_green("Running migration '%s' on %s..." % (migration, context), newline_after=True)
return_code = CommandRunner(context, namespace=namespace).run(tag, migration, constraint=constraint, tty=False)
if return_code:
raise HokusaiError("Migration failed with return code %s" % return_code, return_code=return_code)
Deployment(context, namespace=namespace).update(tag, constraint, git_remote, timeout,
update_config=update_config, filename=filename)
print_green("Deployment(s) updated to %s" % tag)
@command()
def refresh(context, deployment_name, namespace=None):
deployment = Deployment(context, deployment_name=deployment_name, namespace=namespace)
deployment.refresh()
@command()
def promote(migration, constraint, git_remote, timeout, update_config=False, filename=None):
if migration is not None:
print_green("Running migration '%s' on production..." % migration, newline_after=True)
return_code = CommandRunner('production').run('staging', migration, constraint=constraint, tty=False)
if return_code:
raise HokusaiError("Migration failed with return code %s" % return_code, return_code=return_code)
Deployment('production').update('staging', constraint, git_remote, timeout, update_config=update_config, filename=filename)
print_green("Promoted staging to production")
| mit |
pokelondon/pokeradio | web/pokeradio/api/migrations/0001_initial.py | 1 | 4669 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Token'
db.create_table(u'api_token', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, blank=True)),
('token', self.gf('django.db.models.fields.CharField')(max_length=255)),
('enabled', self.gf('django.db.models.fields.BooleanField')(default=True)),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
))
db.send_create_signal(u'api', ['Token'])
def backwards(self, orm):
# Deleting model 'Token'
db.delete_table(u'api_token')
models = {
u'api.token': {
'Meta': {'object_name': 'Token'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['api'] | gpl-3.0 |
tuxfux-hlp-notes/python-batches | archieves/batch-64/09-modules/myenv/lib/python2.7/site-packages/django/utils/daemonize.py | 169 | 2046 | import os
import sys
from . import six
buffering = int(six.PY3) # No unbuffered text I/O on Python 3 (#20815).
if os.name == 'posix':
def become_daemon(our_home_dir='.', out_log='/dev/null',
err_log='/dev/null', umask=0o022):
"Robustly turn into a UNIX daemon, running in our_home_dir."
# First fork
try:
if os.fork() > 0:
sys.exit(0) # kill off parent
except OSError as e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
os.setsid()
os.chdir(our_home_dir)
os.umask(umask)
# Second fork
try:
if os.fork() > 0:
os._exit(0)
except OSError as e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
os._exit(1)
si = open('/dev/null', 'r')
so = open(out_log, 'a+', buffering)
se = open(err_log, 'a+', buffering)
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
# Set custom file descriptors so that they get proper buffering.
sys.stdout, sys.stderr = so, se
else:
def become_daemon(our_home_dir='.', out_log=None, err_log=None, umask=0o022):
"""
If we're not running under a POSIX system, just simulate the daemon
mode by doing redirections and directory changing.
"""
os.chdir(our_home_dir)
os.umask(umask)
sys.stdin.close()
sys.stdout.close()
sys.stderr.close()
if err_log:
sys.stderr = open(err_log, 'a', buffering)
else:
sys.stderr = NullDevice()
if out_log:
sys.stdout = open(out_log, 'a', buffering)
else:
sys.stdout = NullDevice()
class NullDevice:
"A writeable object that writes to nowhere -- like /dev/null."
def write(self, s):
pass
| gpl-3.0 |
idl3r/Ropper | ropperapp/disasm/gadget.py | 1 | 4099 | #!/usr/bin/env python2
# coding=utf-8
#
# Copyright 2014 Sascha Schirra
#
# This file is part of Ropper.
#
# Ropper is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ropper is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import re
import ropperapp.common.enum as enum
from ropperapp.common.utils import toHex
from ropperapp.common.coloredstring import *
class Category(enum.Enum):
_enum_ = 'STACK_PIVOTING LOAD_REG LOAD_MEM STACK_SHIFT SYSCALL JMP CALL WRITE_MEM INC_REG CLEAR_REG SUB_REG ADD_REG XCHG_REG NONE'
class GadgetType(enum.Enum):
_enum_ = 'ROP JOP ALL'
class Gadget(object):
def __init__(self, arch):
super(Gadget, self).__init__()
self.__arch = arch
self.__lines = []
self._gadget = ''
self._vaddr = 0x0
self.__category = None
self.__imageBase = 0x0
@property
def lines(self):
return self.__lines
@property
def imageBase(self):
return self.__imageBase
@imageBase.setter
def imageBase(self, base):
self.__imageBase = base
@property
def vaddr(self):
return self._vaddr
def append(self, address, inst):
self.__lines.append((address, inst))
self._gadget += inst + '\n'
def match(self, filter):
if not filter or len(filter) == 0:
return True
return bool(re.match(filter, self._gadget))
def addressesContainsBytes(self, badbytes):
line = self.__lines[0]
for b in badbytes:
address = line[0] + self.__imageBase
if type(b) == str:
b = ord(b)
for i in range(4):
if (address & 0xff) == b:
return True
address >>= 8
def simpleInstructionString(self):
toReturn = ''
for line in self.__lines:
toReturn += cstr(line[1], Color.LIGHT_GRAY) + cstr('; ', Color.LIGHT_BLUE)
return toReturn
def simpleString(self):
toReturn = '%s: ' % cstr(toHex(self.__lines[0][0] + self.__imageBase, self.__arch.addressLength), Color.RED)
toReturn += self.simpleInstructionString()
return toReturn
@property
def category(self):
if not self.__category:
line = self.__lines[0][1]
for cat, regexs in self.__arch._categories.items():
for regex in regexs[0]:
match = re.match(regex, line)
if match:
for invalid in regexs[1]:
for l in self.__lines[1:]:
if l[1].startswith(invalid):
self.__category = (Category.NONE,)
return self.__category
self.__category = (cat, len(self.__lines) -1 ,match.groupdict())
return self.__category
self.__category = (Category.NONE,)
return self.__category
def __len__(self):
return len(self.__lines)
def __cmp__(self, other):
if isinstance(other, self.__class__) and len(self) == len(other):
return cmp(str(self),str(other))
return -1
def __str__(self):
toReturn = cstr('Gadget', Color.GREEN)+': %s\n' % (cstr(toHex(self.__lines[0][0] + self.__imageBase, self.__arch.addressLength), Color.RED))
for line in self.__lines:
toReturn += cstr(toHex(line[0] + self.__imageBase, self.__arch.addressLength), Color.BLUE) +': '+ cstr(line[1], Color.WHITE) + '\n'
return toReturn
| gpl-2.0 |
JVillella/tensorflow | tensorflow/contrib/learn/python/learn/preprocessing/categorical_vocabulary.py | 63 | 4206 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Categorical vocabulary classes to map categories to indexes.
Can be used for categorical variables, sparse variables and words.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import six
class CategoricalVocabulary(object):
"""Categorical variables vocabulary class.
Accumulates and provides mapping from classes to indexes.
Can be easily used for words.
"""
def __init__(self, unknown_token="<UNK>", support_reverse=True):
self._unknown_token = unknown_token
self._mapping = {unknown_token: 0}
self._support_reverse = support_reverse
if support_reverse:
self._reverse_mapping = [unknown_token]
self._freq = collections.defaultdict(int)
self._freeze = False
def __len__(self):
"""Returns total count of mappings. Including unknown token."""
return len(self._mapping)
def freeze(self, freeze=True):
"""Freezes the vocabulary, after which new words return unknown token id.
Args:
freeze: True to freeze, False to unfreeze.
"""
self._freeze = freeze
def get(self, category):
"""Returns word's id in the vocabulary.
If category is new, creates a new id for it.
Args:
category: string or integer to lookup in vocabulary.
Returns:
interger, id in the vocabulary.
"""
if category not in self._mapping:
if self._freeze:
return 0
self._mapping[category] = len(self._mapping)
if self._support_reverse:
self._reverse_mapping.append(category)
return self._mapping[category]
def add(self, category, count=1):
"""Adds count of the category to the frequency table.
Args:
category: string or integer, category to add frequency to.
count: optional integer, how many to add.
"""
category_id = self.get(category)
if category_id <= 0:
return
self._freq[category] += count
def trim(self, min_frequency, max_frequency=-1):
"""Trims vocabulary for minimum frequency.
Remaps ids from 1..n in sort frequency order.
where n - number of elements left.
Args:
min_frequency: minimum frequency to keep.
max_frequency: optional, maximum frequency to keep.
Useful to remove very frequent categories (like stop words).
"""
# Sort by alphabet then reversed frequency.
self._freq = sorted(
sorted(
six.iteritems(self._freq),
key=lambda x: (isinstance(x[0], str), x[0])),
key=lambda x: x[1],
reverse=True)
self._mapping = {self._unknown_token: 0}
if self._support_reverse:
self._reverse_mapping = [self._unknown_token]
idx = 1
for category, count in self._freq:
if max_frequency > 0 and count >= max_frequency:
continue
if count <= min_frequency:
break
self._mapping[category] = idx
idx += 1
if self._support_reverse:
self._reverse_mapping.append(category)
self._freq = dict(self._freq[:idx - 1])
def reverse(self, class_id):
"""Given class id reverse to original class name.
Args:
class_id: Id of the class.
Returns:
Class name.
Raises:
ValueError: if this vocabulary wasn't initialized with support_reverse.
"""
if not self._support_reverse:
raise ValueError("This vocabulary wasn't initialized with "
"support_reverse to support reverse() function.")
return self._reverse_mapping[class_id]
| apache-2.0 |
mgoulish/qpid-dispatch | tests/system_tests_core_endpoint.py | 2 | 9837 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from proton import Message, Timeout
from system_test import TestCase, Qdrouterd, main_module, TIMEOUT
from system_test import unittest
from proton.handlers import MessagingHandler
from proton.reactor import Container, DynamicNodeProperties
from qpid_dispatch_internal.compat import UNICODE
from qpid_dispatch.management.client import Node
class RouterTest(TestCase):
inter_router_port = None
@classmethod
def setUpClass(cls):
"""Start a router"""
super(RouterTest, cls).setUpClass()
def router(name, connection, args=[]):
config = [
('router', {'mode': 'interior', 'id': name}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'multiTenant': 'yes'}),
('listener', {'port': cls.tester.get_port(), 'stripAnnotations': 'no', 'role': 'route-container'}),
('address', {'prefix': 'closest', 'distribution': 'closest'}),
('address', {'prefix': 'spread', 'distribution': 'balanced'}),
('address', {'prefix': 'multicast', 'distribution': 'multicast'}),
('address', {'prefix': '0.0.0.0/queue', 'waypoint': 'yes'}),
connection
]
config = Qdrouterd.Config(config)
cls.routers.append(cls.tester.qdrouterd(name, config, wait=True, cl_args=args))
cls.routers = []
inter_router_port = cls.tester.get_port()
router('A', ('listener', {'role': 'inter-router', 'port': inter_router_port}), ["-T"])
def test_01_denied_link(self):
test = DenyLinkTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/deny")
test.run()
self.assertEqual(None, test.error)
def test_02_discard_deliveries(self):
test = DiscardTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/discard")
test.run()
self.assertEqual(None, test.error)
def test_03_presettled_source(self):
test = SourceTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/source_ps", 300, 300)
test.run()
self.assertEqual(None, test.error)
def test_04_unsettled_source(self):
test = SourceTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/source", 300, 0)
test.run()
self.assertEqual(None, test.error)
def test_05_echo_attach_detach(self):
test = EchoTest(self.routers[0].addresses[0], "org.apache.qpid.dispatch.router/test/echo")
test.run()
self.assertEqual(None, test.error)
class Timeout(object):
def __init__(self, parent):
self.parent = parent
def on_timer_task(self, event):
self.parent.timeout()
class DenyLinkTest(MessagingHandler):
def __init__(self, host, address):
super(DenyLinkTest, self).__init__(prefetch = 0)
self.host = host
self.address = address
self.conn = None
self.error = None
self.receiver = None
self.sender = None
self.receiver_failed = False
self.sender_failed = False
def timeout(self):
self.error = "Timeout Expired: receiver_failed=%s sender_failed=%s" %\
("yes" if self.receiver_failed else "no",
"yes" if self.sender_failed else "no")
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(5.0, Timeout(self))
self.conn = event.container.connect(self.host)
self.receiver = event.container.create_receiver(self.conn, self.address)
self.sender = event.container.create_sender(self.conn, self.address)
def on_link_error(self, event):
if event.receiver == self.receiver:
self.receiver_failed = True
if event.sender == self.sender:
self.sender_failed = True
if self.receiver_failed and self.sender_failed:
self.conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
class DiscardTest(MessagingHandler):
def __init__(self, host, address):
super(DiscardTest, self).__init__(prefetch = 0)
self.host = host
self.address = address
self.conn = None
self.error = None
self.sender = None
self.count = 300
self.sent = 0
self.rejected = 0
def timeout(self):
self.error = "Timeout Expired: n_sent=%d n_rejected=%d" % (self.sent, self.rejected)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(5.0, Timeout(self))
self.conn = event.container.connect(self.host)
self.sender = event.container.create_sender(self.conn, self.address)
def on_sendable(self, event):
while self.sender.credit > 0 and self.sent < self.count:
msg = Message(body="Discard Test")
self.sender.send(msg)
self.sent += 1
def on_rejected(self, event):
self.rejected += 1
self.conn.close()
self.timer.cancel()
def on_link_error(self, event):
if event.receiver == self.receiver:
self.receiver_failed = True
if event.sender == self.sender:
self.sender_failed = True
if self.receiver_failed and self.sender_failed:
self.conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
class SourceTest(MessagingHandler):
def __init__(self, host, address, count, expected_ps):
super(SourceTest, self).__init__(prefetch = 0)
self.host = host
self.address = address
self.expected_ps = expected_ps
self.conn = None
self.error = None
self.receiver = None
self.count = count
self.n_credit_given = 0
self.n_rcvd = 0
self.n_rcvd_ps = 0
def timeout(self):
self.error = "Timeout Expired: n_rcvd=%d" % (self.n_rcvd)
self.conn.close()
def on_start(self, event):
self.timer = event.reactor.schedule(TIMEOUT, Timeout(self))
self.conn = event.container.connect(self.host)
self.receiver = event.container.create_receiver(self.conn, self.address)
self.receiver.flow(3)
self.n_credit_given = 3
def on_message(self, event):
dlv = event.delivery
if dlv.settled:
self.n_rcvd_ps += 1
self.n_rcvd += 1
if self.n_rcvd == self.count:
self.conn.close()
self.timer.cancel()
if self.n_rcvd_ps != self.expected_ps:
self.error = "Received %d deliveries, %d were settled (expected %d)" %\
(self.n_rcvd, self.n_rcvd_ps, self.expected_ps)
elif self.n_rcvd == self.n_credit_given:
self.receiver.flow(5)
self.n_credit_given += 5
def run(self):
Container(self).run()
class EchoTest(MessagingHandler):
def __init__(self, host, address):
super(EchoTest, self).__init__(prefetch = 0)
self.host = host
self.address = address
self.conn = None
self.error = None
self.action = "Connecting to router"
self.receiver = None
self.sender = None
def timeout(self):
self.error = "Timeout Expired while attempting action: %s" % self.action
self.conn.close()
def fail(self, error):
self.error = error
self.conn.close()
self.timer.cancel()
def on_start(self, event):
self.timer = event.reactor.schedule(5.0, Timeout(self))
self.conn = event.container.connect(self.host)
self.receiver = event.container.create_receiver(self.conn, self.address)
def on_link_opening(self, event):
if event.sender:
self.action = "Attaching incoming echoed link"
self.sender = event.sender
if event.sender.remote_source.address == self.address:
event.sender.source.address = self.address
event.sender.open()
else:
self.fail("Incorrect address on incoming sender: got %s, expected %s" %
(event.sender.remote_source.address, self.address))
def on_link_opened(self, event):
if event.receiver == self.receiver:
self.action = "Closing the echoed link"
self.receiver.close()
def on_link_closed(self, event):
if event.receiver == self.receiver:
self.conn.close()
self.timer.cancel()
def run(self):
Container(self).run()
if __name__ == '__main__':
unittest.main(main_module())
| apache-2.0 |
RO-ny9/python-for-android | python3-alpha/python3-src/PC/VS8.0/build_tkinter.py | 91 | 2196 | """Script to compile the dependencies of _tkinter
Copyright (c) 2007 by Christian Heimes <[email protected]>
Licensed to PSF under a Contributor Agreement.
"""
import os
import sys
here = os.path.abspath(os.path.dirname(__file__))
par = os.path.pardir
if 1:
TCL = "tcl8.4.16"
TK = "tk8.4.16"
TIX = "tix-8.4.0"
else:
TCL = "tcl8.5b3"
TK = "tcl8.5b3"
TIX = "Tix8.4.2"
ROOT = os.path.abspath(os.path.join(here, par, par, par))
# Windows 2000 compatibility: WINVER 0x0500
# http://msdn2.microsoft.com/en-us/library/aa383745.aspx
NMAKE = "nmake /nologo /f %s COMPILERFLAGS=-DWINVER=0x0500 %s %s"
def nmake(makefile, command="", **kw):
defines = ' '.join(k+'='+v for k, v in kw.items())
cmd = NMAKE % (makefile, defines, command)
print("\n\n"+cmd+"\n")
if os.system(cmd) != 0:
raise RuntimeError(cmd)
def build(platform, clean):
if platform == "Win32":
dest = os.path.join(ROOT, "tcltk")
machine = "X86"
elif platform == "x64":
dest = os.path.join(ROOT, "tcltk64")
machine = "X64"
else:
raise ValueError(platform)
# TCL
tcldir = os.path.join(ROOT, TCL)
if 1:
os.chdir(os.path.join(tcldir, "win"))
if clean:
nmake("makefile.vc", "clean")
nmake("makefile.vc")
nmake("makefile.vc", "install", INSTALLDIR=dest)
# TK
if 1:
os.chdir(os.path.join(ROOT, TK, "win"))
if clean:
nmake("makefile.vc", "clean", TCLDIR=tcldir)
nmake("makefile.vc", TCLDIR=tcldir)
nmake("makefile.vc", "install", TCLDIR=tcldir, INSTALLDIR=dest)
# TIX
if 1:
# python9.mak is available at http://svn.python.org
os.chdir(os.path.join(ROOT, TIX, "win"))
if clean:
nmake("python9.mak", "clean")
nmake("python9.mak", MACHINE=machine)
nmake("python9.mak", "install")
def main():
if len(sys.argv) < 2 or sys.argv[1] not in ("Win32", "x64"):
print("%s Win32|x64" % sys.argv[0])
sys.exit(1)
if "-c" in sys.argv:
clean = True
else:
clean = False
build(sys.argv[1], clean)
if __name__ == '__main__':
main()
| apache-2.0 |
h4ck3rm1k3/pywikibot-core | pywikibot/version.py | 1 | 18622 | # -*- coding: utf-8 -*-
"""Module to determine the pywikibot version (tag, revision and date)."""
#
# (C) Merlijn 'valhallasw' van Deen, 2007-2014
# (C) xqt, 2010-2016
# (C) Pywikibot team, 2007-2016
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import datetime
import os
import subprocess
import sys
import time
import xml.dom.minidom
from distutils import log
from distutils.sysconfig import get_python_lib
from io import BytesIO
from warnings import warn
try:
from setuptools import svn_utils
except ImportError:
try:
from setuptools_svn import svn_utils
except ImportError as e:
svn_utils = e
import pywikibot
from pywikibot import config2 as config
from pywikibot.tools import deprecated, PY2
if not PY2:
basestring = (str, )
cache = None
_logger = 'version'
class ParseError(Exception):
"""Parsing went wrong."""
def _get_program_dir():
_program_dir = os.path.normpath(os.path.split(os.path.dirname(__file__))[0])
return _program_dir
def getversion(online=True):
"""Return a pywikibot version string.
@param online: (optional) Include information obtained online
"""
data = dict(getversiondict()) # copy dict to prevent changes in 'cache'
data['cmp_ver'] = 'n/a'
if online:
try:
hsh2 = getversion_onlinerepo()
hsh1 = data['hsh']
data['cmp_ver'] = 'OUTDATED' if hsh1 != hsh2 else 'ok'
except Exception:
pass
data['hsh'] = data['hsh'][:7] # make short hash from full hash
return '%(tag)s (%(hsh)s, %(rev)s, %(date)s, %(cmp_ver)s)' % data
def getversiondict():
"""Get version info for the package.
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{dict} of four C{str}
"""
global cache
if cache:
return cache
_program_dir = _get_program_dir()
exceptions = {}
for vcs_func in (getversion_git,
getversion_svn_setuptools,
getversion_svn,
getversion_nightly,
getversion_package):
try:
(tag, rev, date, hsh) = vcs_func(_program_dir)
except Exception as e:
exceptions[vcs_func] = e
else:
break
else:
# nothing worked; version unknown (but suppress exceptions)
# the value is most likely '$Id' + '$', it means that
# pywikibot was imported without using version control at all.
tag, rev, date, hsh = (
'', '-1 (unknown)', '0 (unknown)', '(unknown)')
# git and svn can silently fail, as it may be a nightly.
if getversion_package in exceptions:
warn('Unable to detect version; exceptions raised:\n%r'
% exceptions, UserWarning)
elif exceptions:
pywikibot.debug('version algorithm exceptions:\n%r'
% exceptions, _logger)
if isinstance(date, basestring):
datestring = date
elif isinstance(date, time.struct_time):
datestring = time.strftime('%Y/%m/%d, %H:%M:%S', date)
else:
warn('Unable to detect package date', UserWarning)
datestring = '-2 (unknown)'
cache = dict(tag=tag, rev=rev, date=datestring, hsh=hsh)
return cache
@deprecated('getversion_svn_setuptools')
def svn_rev_info(path):
"""Fetch information about the current revision of an Subversion checkout.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
@rtype: C{tuple} of two C{str} and a C{time.struct_time}
"""
if not os.path.isdir(os.path.join(path, '.svn')):
path = os.path.join(path, '..')
_program_dir = path
filename = os.path.join(_program_dir, '.svn/entries')
if os.path.isfile(filename):
with open(filename) as entries:
version = entries.readline().strip()
if version != '12':
for i in range(3):
entries.readline()
tag = entries.readline().strip()
t = tag.split('://')
t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',
'')
tag = '[%s] %s' % (t[0], t[1])
for i in range(4):
entries.readline()
date = time.strptime(entries.readline()[:19],
'%Y-%m-%dT%H:%M:%S')
rev = entries.readline()[:-1]
return tag, rev, date
# We haven't found the information in entries file.
# Use sqlite table for new entries format
from sqlite3 import dbapi2 as sqlite
con = sqlite.connect(os.path.join(_program_dir, ".svn/wc.db"))
cur = con.cursor()
cur.execute("""select
local_relpath, repos_path, revision, changed_date, checksum from nodes
order by revision desc, changed_date desc""")
name, tag, rev, date, checksum = cur.fetchone()
cur.execute("select root from repository")
tag, = cur.fetchone()
con.close()
tag = os.path.split(tag)[1]
date = time.gmtime(date / 1000000)
return tag, rev, date
def github_svn_rev2hash(tag, rev):
"""Convert a Subversion revision to a Git hash using Github.
@param tag: name of the Subversion repo on Github
@param rev: Subversion revision identifier
@return: the git hash
@rtype: str
"""
from pywikibot.comms import http
uri = 'https://github.com/wikimedia/%s/!svn/vcc/default' % tag
request = http.fetch(uri=uri, method='PROPFIND',
body="<?xml version='1.0' encoding='utf-8'?>"
"<propfind xmlns=\"DAV:\"><allprop/></propfind>",
headers={'label': str(rev),
'user-agent': 'SVN/1.7.5 {pwb}'})
dom = xml.dom.minidom.parse(BytesIO(request.raw))
hsh = dom.getElementsByTagName("C:git-commit")[0].firstChild.nodeValue
date = dom.getElementsByTagName("S:date")[0].firstChild.nodeValue
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
return hsh, date
def getversion_svn_setuptools(path=None):
"""Get version info for a Subversion checkout using setuptools.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
if isinstance(svn_utils, Exception):
raise svn_utils
tag = 'pywikibot-core'
_program_dir = path or _get_program_dir()
svninfo = svn_utils.SvnInfo(_program_dir)
# suppress warning
old_level = log.set_threshold(log.ERROR)
rev = svninfo.get_revision()
log.set_threshold(old_level)
if not isinstance(rev, int):
raise TypeError('SvnInfo.get_revision() returned type %s' % type(rev))
if rev < 0:
raise ValueError('SvnInfo.get_revision() returned %d' % rev)
if rev == 0:
raise ParseError('SvnInfo: invalid workarea')
hsh, date = github_svn_rev2hash(tag, rev)
rev = 's%s' % rev
return (tag, rev, date, hsh)
@deprecated('getversion_svn_setuptools')
def getversion_svn(path=None):
"""Get version info for a Subversion checkout.
@param path: directory of the Subversion checkout
@return:
- tag (name for the repository),
- rev (current Subversion revision identifier),
- date (date of current revision),
- hash (git hash for the Subversion revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
_program_dir = path or _get_program_dir()
tag, rev, date = svn_rev_info(_program_dir)
hsh, date2 = github_svn_rev2hash(tag, rev)
if date.tm_isdst >= 0 and date2.tm_isdst >= 0:
assert date == date2, 'Date of version is not consistent'
# date.tm_isdst is -1 means unknown state
# compare its contents except daylight saving time status
else:
for i in range(date.n_fields - 1):
assert date[i] == date2[i], 'Date of version is not consistent'
rev = 's%s' % rev
if (not date or not tag or not rev) and not path:
raise ParseError
return (tag, rev, date, hsh)
def getversion_git(path=None):
"""Get version info for a Git clone.
@param path: directory of the Git checkout
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
_program_dir = path or _get_program_dir()
cmd = 'git'
try:
subprocess.Popen([cmd], stdout=subprocess.PIPE).communicate()
except OSError:
# some windows git versions provide git.cmd instead of git.exe
cmd = 'git.cmd'
with open(os.path.join(_program_dir, '.git/config'), 'r') as f:
tag = f.read()
# Try 'origin' and then 'gerrit' as remote name; bail if can't find either.
remote_pos = tag.find('[remote "origin"]')
if remote_pos == -1:
remote_pos = tag.find('[remote "gerrit"]')
if remote_pos == -1:
tag = '?'
else:
s = tag.find('url = ', remote_pos)
e = tag.find('\n', s)
tag = tag[(s + 6):e]
t = tag.strip().split('/')
tag = '[%s] %s' % (t[0][:-1], '-'.join(t[3:]))
with subprocess.Popen([cmd, '--no-pager',
'log', '-1',
'--pretty=format:"%ad|%an|%h|%H|%d"'
'--abbrev-commit',
'--date=iso'],
cwd=_program_dir,
stdout=subprocess.PIPE).stdout as stdout:
info = stdout.read()
info = info.decode(config.console_encoding).split('|')
date = info[0][:-6]
date = time.strptime(date.strip('"'), '%Y-%m-%d %H:%M:%S')
with subprocess.Popen([cmd, 'rev-list', 'HEAD'],
cwd=_program_dir,
stdout=subprocess.PIPE).stdout as stdout:
rev = stdout.read()
rev = 'g%s' % len(rev.splitlines())
hsh = info[3] # also stored in '.git/refs/heads/master'
if (not date or not tag or not rev) and not path:
raise ParseError
return (tag, rev, date, hsh)
def getversion_nightly(path=None):
"""Get version info for a nightly release.
@param path: directory of the uncompressed nightly.
@return:
- tag (name for the repository),
- rev (current revision identifier),
- date (date of current revision),
- hash (git hash for the current revision)
@rtype: C{tuple} of three C{str} and a C{time.struct_time}
"""
if not path:
path = _get_program_dir()
with open(os.path.join(path, 'version')) as data:
(tag, rev, date, hsh) = data.readlines()
date = time.strptime(date[:19], '%Y-%m-%dT%H:%M:%S')
if not date or not tag or not rev:
raise ParseError
return (tag, rev, date, hsh)
def getversion_package(path=None): # pylint: disable=unused-argument
"""Get version info for an installed package.
@param path: Unused argument
@return:
- tag: 'pywikibot/__init__.py'
- rev: '-1 (unknown)'
- date (date the package was installed locally),
- hash (git hash for the current revision of 'pywikibot/__init__.py')
@rtype: C{tuple} of four C{str}
"""
hsh = get_module_version(pywikibot)
date = get_module_mtime(pywikibot).timetuple()
tag = 'pywikibot/__init__.py'
rev = '-1 (unknown)'
return (tag, rev, date, hsh)
def getversion_onlinerepo(repo=None):
"""Retrieve current framework revision number from online repository.
@param repo: (optional) Online repository location
@type repo: URL or string
"""
from pywikibot.comms import http
url = repo or 'https://git.wikimedia.org/feed/pywikibot/core'
buf = http.fetch(uri=url,
headers={'user-agent': '{pwb}'}).content.splitlines()
try:
hsh = buf[13].split('/')[5][:-1]
return hsh
except Exception as e:
raise ParseError(repr(e) + ' while parsing ' + repr(buf))
@deprecated('get_module_version, get_module_filename and get_module_mtime')
def getfileversion(filename):
"""Retrieve revision number of file.
Extracts __version__ variable containing Id tag, without importing it.
(thus can be done for any file)
The version variable containing the Id tag is read and
returned. Because it doesn't import it, the version can
be retrieved from any file.
@param filename: Name of the file to get version
@type filename: string
"""
_program_dir = _get_program_dir()
__version__ = None
mtime = None
fn = os.path.join(_program_dir, filename)
if os.path.exists(fn):
with codecs.open(fn, 'r', "utf-8") as f:
for line in f.readlines():
if line.find('__version__') == 0:
try:
exec(line)
except:
pass
break
stat = os.stat(fn)
mtime = datetime.datetime.fromtimestamp(stat.st_mtime).isoformat(' ')
if mtime and __version__:
return u'%s %s %s' % (filename, __version__[5:-1][:7], mtime)
else:
return None
def get_module_version(module):
"""
Retrieve __version__ variable from an imported module.
@param module: The module instance.
@type module: module
@return: The version hash without the surrounding text. If not present None.
@rtype: str or None
"""
if hasattr(module, '__version__'):
return module.__version__[5:-1]
def get_module_filename(module):
"""
Retrieve filename from an imported pywikibot module.
It uses the __file__ attribute of the module. If it's file extension ends
with py and another character the last character is discarded when the py
file exist.
@param module: The module instance.
@type module: module
@return: The filename if it's a pywikibot module otherwise None.
@rtype: str or None
"""
if hasattr(module, '__file__') and os.path.exists(module.__file__):
filename = module.__file__
if filename[-4:-1] == '.py' and os.path.exists(filename[:-1]):
filename = filename[:-1]
program_dir = _get_program_dir()
if filename[:len(program_dir)] == program_dir:
return filename
def get_module_mtime(module):
"""
Retrieve the modification time from an imported module.
@param module: The module instance.
@type module: module
@return: The modification time if it's a pywikibot module otherwise None.
@rtype: datetime or None
"""
filename = get_module_filename(module)
if filename:
return datetime.datetime.fromtimestamp(os.stat(filename).st_mtime)
def package_versions(modules=None, builtins=False, standard_lib=None):
"""Retrieve package version information.
When builtins or standard_lib are None, they will be included only
if a version was found in the package.
@param modules: Modules to inspect
@type modules: list of strings
@param builtins: Include builtins
@type builtins: Boolean, or None for automatic selection
@param standard_lib: Include standard library packages
@type standard_lib: Boolean, or None for automatic selection
"""
if not modules:
modules = sys.modules.keys()
std_lib_dir = get_python_lib(standard_lib=True)
root_packages = set([key.split('.')[0]
for key in modules])
builtin_packages = set([name.split('.')[0] for name in root_packages
if name in sys.builtin_module_names or
'_' + name in sys.builtin_module_names])
# Improve performance by removing builtins from the list if possible.
if builtins is False:
root_packages = list(root_packages - builtin_packages)
std_lib_packages = []
paths = {}
data = {}
for name in root_packages:
try:
package = __import__(name, level=0)
except Exception as e:
data[name] = {'name': name, 'err': e}
continue
info = {'package': package, 'name': name}
if name in builtin_packages:
info['type'] = 'builtins'
if '__file__' in package.__dict__:
# Determine if this file part is of the standard library.
if os.path.normcase(package.__file__).startswith(
os.path.normcase(std_lib_dir)):
std_lib_packages.append(name)
if standard_lib is False:
continue
info['type'] = 'standard libary'
# Strip '__init__.py' from the filename.
path = package.__file__
if '__init__.py' in path:
path = path[0:path.index('__init__.py')]
if PY2:
path = path.decode(sys.getfilesystemencoding())
info['path'] = path
assert path not in paths, 'Path of the package is in defined paths'
paths[path] = name
if '__version__' in package.__dict__:
info['ver'] = package.__version__
elif name.startswith('unicodedata'):
info['ver'] = package.unidata_version
# If builtins or standard_lib is None,
# only include package if a version was found.
if (builtins is None and name in builtin_packages) or \
(standard_lib is None and name in std_lib_packages):
if 'ver' in info:
data[name] = info
else:
# Remove the entry from paths, so it isnt processed below
del paths[info['path']]
else:
data[name] = info
# Remove any pywikibot sub-modules which were loaded as a package.
# e.g. 'wikipedia_family.py' is loaded as 'wikipedia'
_program_dir = _get_program_dir()
for path, name in paths.items():
if _program_dir in path:
del data[name]
return data
| mit |
RedMike/pYendor | lib/fov.py | 1 | 11735 | # Author: Aaron MacDonald
# Date: June 14, 2007
#
# Description: An implementation of the precise permissive field
# of view algorithm for use in tile-based games.
# Based on the algorithm presented at
# http://roguebasin.roguelikedevelopment.org/
# index.php?title=
# Precise_Permissive_Field_of_View.
#
# You are free to use or modify this code as long as this notice is
# included.
# This code is released without warranty.
#
# ---
#
# FovMap is not included under this notice, as it is pYendor-specific.
import copy
def fieldOfView(startX, startY, mapWidth, mapHeight, radius,
funcVisitTile, funcTileBlocked):
"""
Determines which coordinates on a 2D grid are visible from a
particular coordinate.
startX, startY: The (x, y) coordinate on the grid that
is the centre of view.
mapWidth, mapHeight: The maximum extents of the grid. The
minimum extents are assumed to be both
zero.
radius: How far the field of view may extend
in either direction along the x and y
axis.
funcVisitTile: User function that takes two integers
representing an (x, y) coordinate. Is
used to "visit" visible coordinates.
funcTileBlocked: User function that takes two integers
representing an (x, y) coordinate.
Returns True if the coordinate blocks
sight to coordinates "behind" it.
"""
visited = set() # Keep track of what tiles have been visited so
# that no tile will be visited twice.
# Will always see the centre.
funcVisitTile(startX, startY)
visited.add((startX, startY))
# Ge the dimensions of the actual field of view, making
# sure not to go off the map or beyond the radius.
if startX < radius:
minExtentX = startX
else:
minExtentX = radius
if mapWidth - startX - 1 < radius:
maxExtentX = mapWidth - startX - 1
else:
maxExtentX = radius
if startY < radius:
minExtentY = startY
else:
minExtentY = radius
if mapHeight - startY - 1 < radius:
maxExtentY = mapHeight - startY - 1
else:
maxExtentY = radius
# Northeast quadrant
__checkQuadrant(visited, startX, startY, 1, 1,
maxExtentX, maxExtentY,
funcVisitTile, funcTileBlocked)
# Southeast quadrant
__checkQuadrant(visited, startX, startY, 1, -1,
maxExtentX, minExtentY,
funcVisitTile, funcTileBlocked)
# Southwest quadrant
__checkQuadrant(visited, startX, startY, -1, -1,
minExtentX, minExtentY,
funcVisitTile, funcTileBlocked)
# Northwest quadrant
__checkQuadrant(visited, startX, startY, -1, 1,
minExtentX, maxExtentY,
funcVisitTile, funcTileBlocked)
#-------------------------------------------------------------
class __Line(object):
def __init__(self, xi, yi, xf, yf):
self.xi = xi
self.yi = yi
self.xf = xf
self.yf = yf
dx = property(fget = lambda self: self.xf - self.xi)
dy = property(fget = lambda self: self.yf - self.yi)
def pBelow(self, x, y):
return self.relativeSlope(x, y) > 0
def pBelowOrCollinear(self, x, y):
return self.relativeSlope(x, y) >= 0
def pAbove(self, x, y):
return self.relativeSlope(x, y) < 0
def pAboveOrCollinear(self, x, y):
return self.relativeSlope(x, y) <= 0
def pCollinear(self, x, y):
return self.relativeSlope(x, y) == 0
def lineCollinear(self, line):
return self.pCollinear(line.xi, line.yi)\
and self.pCollinear(line.xf, line.yf)
def relativeSlope(self, x, y):
return (self.dy * (self.xf - x))\
- (self.dx * (self.yf - y))
class __ViewBump:
def __init__(self, x, y, parent):
self.x = x
self.y = y
self.parent = parent
class __View:
def __init__(self, shallowLine, steepLine):
self.shallowLine = shallowLine
self.steepLine = steepLine
self.shallowBump = None
self.steepBump = None
def __checkQuadrant(visited, startX, startY, dx, dy,
extentX, extentY, funcVisitTile, funcTileBlocked):
activeViews = []
shallowLine = __Line(0, 1, extentX, 0)
steepLine = __Line(1, 0, 0, extentY)
activeViews.append( __View(shallowLine, steepLine) )
viewIndex = 0
# Visit the tiles diagonally and going outwards
#
# .
# .
# . .
# 9 .
# 5 8 .
# 2 4 7
# @ 1 3 6 . . .
maxI = extentX + extentY
i = 1
while i != maxI + 1 and len(activeViews) > 0:
if 0 > i - extentX:
startJ = 0
else:
startJ = i - extentX
if i < extentY:
maxJ = i
else:
maxJ = extentY
j = startJ
while j != maxJ + 1 and viewIndex < len(activeViews):
x = i - j
y = j
__visitCoord(visited, startX, startY, x, y, dx, dy,
viewIndex, activeViews,
funcVisitTile, funcTileBlocked)
j += 1
i += 1
def __visitCoord(visited, startX, startY, x, y, dx, dy, viewIndex,
activeViews, funcVisitTile, funcTileBlocked):
# The top left and bottom right corners of the current coordinate.
topLeft = (x, y + 1)
bottomRight = (x + 1, y)
while viewIndex < len(activeViews)\
and activeViews[viewIndex].steepLine.pBelowOrCollinear(
bottomRight[0], bottomRight[1]):
# The current coordinate is above the current view and is
# ignored. The steeper fields may need it though.
viewIndex += 1
if viewIndex == len(activeViews)\
or activeViews[viewIndex].shallowLine.pAboveOrCollinear(
topLeft[0], topLeft[1]):
# Either the current coordinate is above all of the fields
# or it is below all of the fields.
return
# It is now known that the current coordinate is between the steep
# and shallow lines of the current view.
isBlocked = False
# The real quadrant coordinates
realX = x * dx
realY = y * dy
if (startX + realX, startY + realY) not in visited:
visited.add((startX + realX, startY + realY))
funcVisitTile(startX + realX, startY + realY)
# else:
# # Debugging
# print (startX + realX, startY + realY)
isBlocked = funcTileBlocked(startX + realX, startY + realY)
if not isBlocked:
# The current coordinate does not block sight and therefore
# has no effect on the view.
return
if activeViews[viewIndex].shallowLine.pAbove(
bottomRight[0], bottomRight[1])\
and activeViews[viewIndex].steepLine.pBelow(
topLeft[0], topLeft[1]):
# The current coordinate is intersected by both lines in the
# current view. The view is completely blocked.
del activeViews[viewIndex]
elif activeViews[viewIndex].shallowLine.pAbove(
bottomRight[0], bottomRight[1]):
# The current coordinate is intersected by the shallow line of
# the current view. The shallow line needs to be raised.
__addShallowBump(topLeft[0], topLeft[1],
activeViews, viewIndex)
__checkView(activeViews, viewIndex)
elif activeViews[viewIndex].steepLine.pBelow(
topLeft[0], topLeft[1]):
# The current coordinate is intersected by the steep line of
# the current view. The steep line needs to be lowered.
__addSteepBump(bottomRight[0], bottomRight[1], activeViews,
viewIndex)
__checkView(activeViews, viewIndex)
else:
# The current coordinate is completely between the two lines
# of the current view. Split the current view into two views
# above and below the current coordinate.
shallowViewIndex = viewIndex
viewIndex += 1
steepViewIndex = viewIndex
activeViews.insert(shallowViewIndex,
copy.deepcopy(activeViews[shallowViewIndex]))
__addSteepBump(bottomRight[0], bottomRight[1],
activeViews, shallowViewIndex)
if not __checkView(activeViews, shallowViewIndex):
viewIndex -= 1
steepViewIndex -= 1
__addShallowBump(topLeft[0], topLeft[1], activeViews,
steepViewIndex)
__checkView(activeViews, steepViewIndex)
def __addShallowBump(x, y, activeViews, viewIndex):
activeViews[viewIndex].shallowLine.xf = x
activeViews[viewIndex].shallowLine.yf = y
activeViews[viewIndex].shallowBump = __ViewBump(x, y,
activeViews[viewIndex].shallowBump)
curBump = activeViews[viewIndex].steepBump
while curBump is not None:
if activeViews[viewIndex].shallowLine.pAbove(
curBump.x, curBump.y):
activeViews[viewIndex].shallowLine.xi = curBump.x
activeViews[viewIndex].shallowLine.yi = curBump.y
curBump = curBump.parent
def __addSteepBump(x, y, activeViews, viewIndex):
activeViews[viewIndex].steepLine.xf = x
activeViews[viewIndex].steepLine.yf = y
activeViews[viewIndex].steepBump = __ViewBump(x, y,
activeViews[viewIndex].steepBump)
curBump = activeViews[viewIndex].shallowBump
while curBump is not None:
if activeViews[viewIndex].steepLine.pBelow(
curBump.x, curBump.y):
activeViews[viewIndex].steepLine.xi = curBump.x
activeViews[viewIndex].steepLine.yi = curBump.y
curBump = curBump.parent
def __checkView(activeViews, viewIndex):
"""
Removes the view in activeViews at index viewIndex if
- The two lines are coolinear
- The lines pass through either extremity
"""
shallowLine = activeViews[viewIndex].shallowLine
steepLine = activeViews[viewIndex].steepLine
if shallowLine.lineCollinear(steepLine)\
and ( shallowLine.pCollinear(0, 1)
or shallowLine.pCollinear(1, 0) ):
del activeViews[viewIndex]
return False
else:
return True
class FovMap(object):
def __init__(self, w, h):
self.w = w
self.h = h
self.map = [
[0 for i in range(self.h)] for j in range(self.w)
]
self.clear_light()
def clear_light(self):
self.light_map = [
[0 for i in range(self.h)] for j in range(self.w)
]
def get_explored(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
return self.map[x][y]
return 0
def set_explored(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
self.map[x][y] = 1
def set_lit(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
self.light_map[x][y] = 1
self.set_explored(x, y)
def get_lit(self, x, y):
if 0 < x < self.w and 0 < y < self.h:
return self.light_map[x][y]
return 0 | bsd-2-clause |
TakayukiSakai/tensorflow | tensorflow/python/ops/gradients.py | 2 | 28479 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import contextlib
import warnings
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_grad # pylint: disable=unused-import
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import constant_op
from tensorflow.python.ops import control_flow_grad # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import image_grad # pylint: disable=unused-import
from tensorflow.python.ops import logging_ops # pylint: disable=unused-import
from tensorflow.python.ops import linalg_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_grad # pylint: disable=unused-import
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.platform import tf_logging as logging
# Warn the user if we convert a sparse representation to dense with at
# least this number of elements.
_LARGE_SPARSE_NUM_ELEMENTS = 100000000
def _IndexedSlicesToTensor(value, dtype=None, name=None, as_ref=False):
"""Converts an IndexedSlices object `value` to a Tensor.
NOTE(mrry): This function is potentially expensive.
Args:
value: An ops.IndexedSlices object.
dtype: The dtype of the Tensor to be returned.
name: Optional name to use for the returned Tensor.
as_ref: True if a ref is requested.
Returns:
A dense Tensor representing the values in the given IndexedSlices.
Raises:
ValueError: If the IndexedSlices does not have the same dtype.
"""
_ = as_ref
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(
"Tensor conversion requested dtype %s for IndexedSlices with dtype %s" %
(dtype.name, value.dtype.name))
if value.dense_shape is None:
raise ValueError(
"Tensor conversion requested for IndexedSlices without dense_shape: %s"
% str(value))
# TODO(mrry): Consider adding static shape information to
# IndexedSlices, to avoid using numpy here.
dense_shape_value = tensor_util.constant_value(value.dense_shape)
if dense_shape_value is not None:
num_elements = np.prod(dense_shape_value)
if num_elements >= _LARGE_SPARSE_NUM_ELEMENTS:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor with %d elements. "
"This may consume a large amount of memory." % num_elements)
else:
warnings.warn(
"Converting sparse IndexedSlices to a dense Tensor of unknown shape. "
"This may consume a large amount of memory.")
return math_ops.unsorted_segment_sum(value.values,
value.indices,
value.dense_shape[0],
name=name)
ops.register_tensor_conversion_function(ops.IndexedSlices,
_IndexedSlicesToTensor)
def _MarkReachedOps(from_ops, reached_ops):
"""Mark all ops reached from "from_ops".
Args:
from_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
"""
queue = collections.deque()
queue.extend(from_ops)
while queue:
op = queue.popleft()
if not reached_ops[op._id]:
reached_ops[op._id] = True
for output in op.outputs:
queue.extend(output.consumers())
def _GatherInputs(to_ops, reached_ops):
"""List all inputs of to_ops that are in reached_ops.
Args:
to_ops: list of Operations.
reached_ops: list of booleans, indexed by operation id.
Returns:
The list of all inputs of to_ops that are in reached_ops.
That list includes all elements of to_ops.
"""
inputs = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
inputs.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
return inputs
def _PendingCount(graph, to_ops, from_ops):
"""Initialize the pending count for ops between two lists of Operations.
'pending_count[op._id]' indicates the number of backprop inputs
to this operation.
Args:
graph: a Graph.
to_ops: list of Operations.
from_ops: list of Operations.
Returns:
A tuple containing: (1) a list of integers indexed by operation id,
indicating the number of backprop inputs to this operation, and (2)
a ControlFlowState object which is not None if the ops between from_ops
and to_ops contain control flow loops.
"""
# Mark reachable ops from from_ops.
reached_ops = [False] * (graph._last_id + 1)
for op in to_ops:
reached_ops[op._id] = True
_MarkReachedOps(from_ops, reached_ops)
# Mark between ops.
between_ops = [False] * (graph._last_id + 1)
between_op_list = []
queue = collections.deque()
queue.extend(to_ops)
while queue:
op = queue.popleft()
# We are interested in this op.
if reached_ops[op._id]:
between_ops[op._id] = True
between_op_list.append(op)
# Clear the boolean so we won't add the inputs again.
reached_ops[op._id] = False
for inp in op.inputs:
queue.append(inp.op)
# 'loop_state' is None if there are no while loops.
loop_state = control_flow_ops.MaybeCreateControlFlowState(between_op_list,
between_ops)
# Initialize pending count for between ops.
pending_count = [0] * (graph._last_id + 1)
for op in between_op_list:
for x in op.inputs:
if between_ops[x.op._id]:
pending_count[x.op._id] += 1
for x in op.control_inputs:
if between_ops[x._id]:
pending_count[x._id] += 1
return pending_count, loop_state
def _AsList(x):
return x if isinstance(x, (list, tuple)) else [x]
def _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops):
"""Fill in default values for grad_ys.
Args:
grad_ys: List of gradients, can contain None.
ys: List of tensors.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
Returns:
A list of gradients to use, without None.
Raises:
ValueError: If one of the grad_ys is invalid.
"""
if len(grad_ys) != len(ys):
raise ValueError("Passed %d grad_ys for %d ys" % (len(grad_ys), len(ys)))
grad_ys = ops.convert_n_to_tensor_or_indexed_slices(grad_ys, name="grad_y")
for i in xrange(len(grad_ys)):
grad_y = grad_ys[i]
y = ys[i]
if grad_y is None:
with _maybe_colocate_with(y.op, colocate_gradients_with_ops):
grad_ys[i] = array_ops.fill(
array_ops.shape(y),
constant_op.constant(1, dtype=y.dtype))
else:
if grad_y.dtype != y.dtype:
raise ValueError("Y and ys_grad must be of the same type, "
"not y: %s, ys_grad: %s " %
(dtypes.as_dtype(y.dtype).name,
dtypes.as_dtype(grad_y.dtype).name))
return grad_ys
def _IsFloat(tensor):
dtype = dtypes.as_dtype(tensor.dtype)
return dtype.base_dtype in (dtypes.float32, dtypes.float64)
def _VerifyGeneratedGradients(grads, op):
"""Verify that gradients are valid in number and type.
Args:
grads: List of generated gradients.
op: Operation for which the gradients where generated.
Raises:
ValueError: if the gradients are invalid.
"""
if len(grads) != len(op.inputs):
raise ValueError("Num gradients %d generated for op %s do not match num "
"inputs %d" % (len(grads), op.node_def, len(op.inputs)))
for i in xrange(len(grads)):
grad = grads[i]
inp = op.inputs[i]
if grad is not None:
if not grad.dtype.is_compatible_with(inp.dtype):
raise ValueError("Gradient type %s generated for op %s does "
"not match input type %s" %
(dtypes.as_dtype(grad.dtype).name, op.node_def,
dtypes.as_dtype(inp.dtype).name))
def _StopOps(from_ops, pending_count):
"""The set of ops that terminate the gradient computation.
This computes the frontier of the forward graph *before* which backprop
should stop. Operations in the returned set will not be differentiated.
This set is defined as the subset of `from_ops` containing ops that have
no predecessor in `from_ops`. `pending_count` is the result of
`_PendingCount(g, xs, from_ops)`. An 'op' has predecessors in `from_ops`
iff pending_count[op._id] > 0.
Args:
from_ops: list of Operations.
pending_count: List of integers, indexed by operation id.
Returns:
The set of operations.
"""
stop_ops = set()
for op in from_ops:
is_stop_op = True
for inp in op.inputs:
if pending_count[inp.op._id] > 0:
is_stop_op = False
break
if is_stop_op:
stop_ops.add(op._id)
return stop_ops
@contextlib.contextmanager
def _maybe_colocate_with(op, colocate_gradients_with_ops):
"""Context to colocate with `op` if `colocate_gradients_with_ops`."""
if colocate_gradients_with_ops:
with ops.colocate_with(op):
yield
else:
yield
def gradients(ys,
xs,
grad_ys=None,
name="gradients",
colocate_gradients_with_ops=False,
gate_gradients=False,
aggregation_method=None):
"""Constructs symbolic partial derivatives of sum of `ys` w.r.t. x in `xs`.
`ys` and `xs` are each a `Tensor` or a list of tensors. `grad_ys`
is a list of `Tensor`, holding the gradients received by the
`ys`. The list must be the same length as `ys`.
`gradients()` adds ops to the graph to output the partial
derivatives of `ys` with respect to `xs`. It returns a list of
`Tensor` of length `len(xs)` where each tensor is the `sum(dy/dx)`
for y in `ys`.
`grad_ys` is a list of tensors of the same length as `ys` that holds
the initial gradients for each y in `ys`. When `grad_ys` is None,
we fill in a tensor of '1's of the shape of y for each y in `ys`. A
user can provide their own initial `grad_ys` to compute the
derivatives using a different initial gradient for each y (e.g., if
one wanted to weight the gradient differently for each value in
each y).
Args:
ys: A `Tensor` or list of tensors to be differentiated.
xs: A `Tensor` or list of tensors to be used for differentiation.
grad_ys: Optional. A `Tensor` or list of tensors the same size as
`ys` and holding the gradients computed for each y in `ys`.
name: Optional name to use for grouping all the gradient ops together.
defaults to 'gradients'.
colocate_gradients_with_ops: If True, try colocating gradients with
the corresponding op.
gate_gradients: If True, add a tuple around the gradients returned
for an operations. This avoids some race conditions.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of `sum(dy/dx)` for each x in `xs`.
Raises:
LookupError: if one of the operations between `x` and `y` does not
have a registered gradient function.
ValueError: if the arguments are invalid.
"""
ys = _AsList(ys)
xs = _AsList(xs)
if grad_ys is None:
grad_ys = [None] * len(ys)
else:
grad_ys = _AsList(grad_ys)
with ops.op_scope(ys + xs + grad_ys, name, "gradients"):
ys = ops.convert_n_to_tensor_or_indexed_slices(ys, name="y")
xs = ops.convert_n_to_tensor_or_indexed_slices(xs, name="x")
grad_ys = _DefaultGradYs(grad_ys, ys, colocate_gradients_with_ops)
# The approach we take here is as follows: Create a list of all ops in the
# subgraph between the ys and xs. Visit these ops in reverse order of ids
# to ensure that when we visit an op the gradients w.r.t its outputs have
# been collected. Then aggregate these gradients if needed, call the op's
# gradient function, and add the generated gradients to the gradients for
# its input.
# Initialize the pending count for ops in the connected subgraph from ys
# to the xs.
to_ops = [t.op for t in ys]
from_ops = [t.op for t in xs]
pending_count, loop_state = _PendingCount(ops.get_default_graph(),
to_ops, from_ops)
# Iterate over the collected ops.
#
# grads: op => list of gradients received on each output endpoint of the
# op. The gradients for each endpoint are initially collected as a list.
# When it is time to call the op's gradient function, for each endpoint we
# aggregate the list of received gradients into a Add() Operation if there
# is more than one.
grads = {}
# Add the initial gradients for the ys.
for y, grad_y in zip(ys, grad_ys):
_SetGrad(grads, y, grad_y)
# Initialize queue with to_ops.
queue = collections.deque()
# Add the ops in 'to_ops' into the queue.
to_ops_set = set()
for op in to_ops:
# 'ready' handles the case where one output gradient relies on
# another output's gradient.
# pylint: disable=protected-access
ready = (pending_count[op._id] == 0)
if ready and op._id not in to_ops_set:
to_ops_set.add(op._id)
queue.append(op)
if loop_state:
# The "unused" exits of the loops are added to ys. As an example,
# people often write:
# v1, _ = While(p, b, [x1, x2])
# result = gradients(v1, x1)
# The exit node of x2 is not included by the betweenness analysis.
# But we need it if x2 is involved in computing v1. So we add it
# back in backprop with a zeros_like gradient.
loop_exits = loop_state.GetAllLoopExits()
for y in loop_exits:
if pending_count[y.op._id] == 0 and y.op._id not in to_ops_set:
if _IsFloat(y):
# Floating-point outputs get a zero gradient.
_SetGrad(grads, y, loop_state.ZerosLikeForExit(y))
queue.append(y.op)
# The set of 'from_ops'.
stop_ops = _StopOps(from_ops, pending_count)
while queue:
# generate gradient subgraph for op.
op = queue.popleft()
with _maybe_colocate_with(op, colocate_gradients_with_ops):
if loop_state:
loop_state.EnterGradWhileContext(op, before=True)
out_grads = _AggregatedGrads(grads, op, loop_state, aggregation_method)
if loop_state:
loop_state.ExitGradWhileContext(op, before=True)
grad_fn = None
# pylint: disable=protected-access
is_func_call = ops.get_default_graph()._is_function(op.type)
if not is_func_call and any(
isinstance(g, ops.Tensor) or g for g in out_grads) and (
op._id not in stop_ops):
# pylint: enable=protected-access
# A grad_fn must be defined, either as a function or as None
# for ops that do not have gradients.
try:
grad_fn = ops.get_gradient_function(op)
except LookupError:
raise LookupError(
"No gradient defined for operation '%s' (op type: %s)" %
(op.name, op.type))
if loop_state:
loop_state.EnterGradWhileContext(op, before=False)
if (grad_fn or is_func_call) and any(
isinstance(g, ops.Tensor) or g for g in out_grads):
# NOTE: If _AggregatedGrads didn't compute a value for the i'th
# output, it means that the cost does not depend on output[i],
# therefore dC/doutput[i] is 0.
for i, out_grad in enumerate(out_grads):
if (not isinstance(out_grad, ops.Tensor)
and not out_grad) and _IsFloat(op.outputs[i]):
# Only floating-point outputs get a zero gradient. Gradient
# functions should ignore the gradient for other outputs.
if loop_state:
out_grads[i] = loop_state.ZerosLike(op, i)
else:
out_grads[i] = control_flow_ops.ZerosLikeOutsideLoop(op, i)
with ops.name_scope(op.name + "_grad"):
# pylint: disable=protected-access
with ops.get_default_graph()._original_op(op):
# pylint: enable=protected-access
if is_func_call:
# For function call ops, we add a 'SymbolicGradient'
# node to the graph to compute gradients.
f_in = [x for x in op.inputs] + out_grads
f_types = [x.dtype for x in op.inputs]
# pylint: disable=protected-access
in_grads = _AsList(functional_ops._symbolic_gradient(
f_in, f_types, op.type))
# pylint: enable=protected-access
else:
in_grads = _AsList(grad_fn(op, *out_grads))
_VerifyGeneratedGradients(in_grads, op)
if gate_gradients and len(
[x for x in in_grads if x is not None]) > 1:
in_grads = control_flow_ops.tuple(in_grads)
_LogOpGradients(op, out_grads, in_grads)
else:
# If no grad_fn is defined or none of out_grads is available,
# just propagates a list of None backwards.
in_grads = [None] * len(op.inputs)
for t_in, in_grad in zip(op.inputs, in_grads):
if in_grad is not None:
if isinstance(in_grad, ops.Tensor):
in_grad.set_shape(t_in.get_shape())
_SetGrad(grads, t_in, in_grad)
if loop_state:
loop_state.ExitGradWhileContext(op, before=False)
# update pending count for the inputs of op and enqueue ready ops.
# pylint: disable=protected-access
for x in op.inputs:
pending_count[x.op._id] -= 1
ready = (pending_count[x.op._id] == 0)
if loop_state and not ready:
ready = (pending_count[x.op._id] > 0 and
control_flow_ops.IsLoopSwitch(x.op))
if ready:
queue.append(x.op)
for x in op.control_inputs:
pending_count[x._id] -= 1
if pending_count[x._id] is 0:
queue.append(x)
# pylint: enable=protected-access
if loop_state:
loop_state.PostProcessing()
return [_GetGrad(grads, x) for x in xs]
def _SetGrad(grads, t, grad):
"""Sets gradient "grad" in "grads" for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
op_grads = [[] for _ in xrange(len(op.outputs))]
grads[op] = op_grads
t_grads = op_grads[t.value_index]
if isinstance(t_grads, list):
t_grads.append(grad)
else:
assert control_flow_ops.IsLoopSwitch(op)
op_grads[t.value_index] = grad
def _GetGrad(grads, t):
"""Gets gradient for tensor "t"."""
op = t.op
op_grads = grads.get(op)
if not op_grads:
return None
t_grad = op_grads[t.value_index]
assert not isinstance(t_grad, list), (
"gradients list should have been aggregated by now.")
return t_grad
def _GetGrads(grads, op):
"""Gets all gradients for op."""
if op in grads:
return grads[op]
else:
return [[] for _ in xrange(len(op.outputs))]
def _HandleNestedIndexedSlices(grad):
assert isinstance(grad, ops.IndexedSlices)
if isinstance(grad.values, ops.Tensor):
return grad
else:
assert isinstance(grad.values, ops.IndexedSlices)
g = _HandleNestedIndexedSlices(grad.values)
return ops.IndexedSlices(
g.values, array_ops.gather(grad.indices, g.indices), g.dense_shape)
def _AccumulatorShape(inputs):
shape = tensor_shape.unknown_shape()
for i in inputs:
if isinstance(i, ops.Tensor):
shape = shape.merge_with(i.get_shape())
return shape
def _LogOpGradients(op, out_grads, in_grads):
"""Log the in and out grads of an op."""
logging.vlog(1, "Gradient for '" + op.name + "'")
def _FilterGrad(x):
if x is None:
return False
if isinstance(x, (list, tuple)):
return bool(x)
else:
return True
logging.vlog(1, " in --> %s",
", ".join([x.name for x in out_grads if _FilterGrad(x)]))
logging.vlog(1, " out --> %s",
", ".join([x.name for x in in_grads if _FilterGrad(x)]))
class AggregationMethod(object):
"""A class listing aggregation methods used to combine gradients.
Computing partial derivatives can require aggregating gradient
contributions. This class lists the various methods that can
be used to combine gradients in the graph:
* `ADD_N`: All of the gradient terms are summed as part of one
operation using the "AddN" op. It has the property that all
gradients must be ready before any aggregation is performed.
* `DEFAULT`: The system-chosen default aggregation method.
"""
ADD_N = 0
DEFAULT = ADD_N
# The following are experimental and may not be supported in future releases.
EXPERIMENTAL_TREE = 1
EXPERIMENTAL_ACCUMULATE_N = 2
def _AggregatedGrads(grads, op, loop_state, aggregation_method=None):
"""Get the aggregated gradients for op.
Args:
grads: The map of memoized gradients.
op: The op to get gradients for.
loop_state: An object for maintaining the state of the while loops in the
graph. It is of type ControlFlowState. None if the graph
contains no while loops.
aggregation_method: Specifies the method used to combine gradient terms.
Accepted values are constants defined in the class `AggregationMethod`.
Returns:
A list of gradients, one per each output of `op`. If the gradients
for a particular output is a list, this function aggregates it
before returning.
Raises:
TypeError: if the incoming grads are not Tensors or IndexedSlices.
ValueError: if the arguments are invalid.
"""
if aggregation_method is None:
aggregation_method = AggregationMethod.DEFAULT
if aggregation_method not in [AggregationMethod.ADD_N,
AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N]:
raise ValueError(
"Invalid aggregation_method specified %s." % aggregation_method)
out_grads = _GetGrads(grads, op)
for i, out_grad in enumerate(out_grads):
if loop_state:
if isinstance(out_grad, (ops.Tensor, ops.IndexedSlices)):
assert control_flow_ops.IsLoopSwitch(op)
continue
# Grads have to be Tensors or IndexedSlices
if (out_grad is None or
not all([isinstance(g, (ops.Tensor, ops.IndexedSlices))
for g in out_grad if g is not None])):
raise TypeError("gradients have to be either all Tensors "
"or all IndexedSlices")
# Aggregate multiple gradients, and convert [] to None.
if out_grad:
if len(out_grad) < 2:
used = "nop"
out_grads[i] = out_grad[0]
elif all([isinstance(g, ops.Tensor) for g in out_grad if g is not None]):
tensor_shape = _AccumulatorShape(out_grad)
if (aggregation_method == AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
and len(out_grad) > 2 and tensor_shape.is_fully_defined()):
# The benefit of using AccumulateN is that its inputs can be combined
# in any order and this can allow the expression to be evaluated with
# a smaller memory footprint. When used with gpu_allocator_retry,
# it is possible to compute a sum of terms which are much larger than
# total GPU memory.
# AccumulateN can currently only be used if we know the shape for
# an accumulator variable. If this is not known, or if we only have
# 2 grads then we fall through to the "tree" case below.
used = "accumulate_n"
out_grads[i] = math_ops.accumulate_n(out_grad)
elif aggregation_method in [AggregationMethod.EXPERIMENTAL_TREE,
AggregationMethod.EXPERIMENTAL_ACCUMULATE_N
]:
# Aggregate all gradients by doing pairwise sums: this may
# reduce performance, but it can improve memory because the
# gradients can be released earlier.
#
# TODO(vrv): Consider replacing this with a version of
# tf.AddN() that eagerly frees its inputs as soon as they are
# ready, so the order of this tree does not become a problem.
used = "tree"
with ops.name_scope(op.name + "_gradient_sum"):
running_sum = out_grad[0]
for grad in out_grad[1:]:
running_sum = math_ops.add_n([running_sum, grad])
out_grads[i] = running_sum
else:
used = "add_n"
out_grads[i] = math_ops.add_n(out_grad)
logging.vlog(2, " _AggregatedGrads %d x %s using %s", len(out_grad),
tensor_shape, used)
else:
out_grad = math_ops._as_indexed_slices_list([g for g in out_grad
if g is not None])
out_grad = [_HandleNestedIndexedSlices(x) for x in out_grad]
# Form IndexedSlices out of the concatenated values and
# indices.
out_grads[i] = ops.IndexedSlices(
array_ops.concat(0, [x.values for x in out_grad]),
array_ops.concat(0, [x.indices
for x in out_grad]), out_grad[0].dense_shape)
else:
out_grads[i] = []
return out_grads
# TODO(vrv): Make this available when we want to make it public.
def _hessian_vector_product(ys, xs, v):
"""Multiply the Hessian of `ys` wrt `xs` by `v`.
This is an efficient construction that uses a backprop-like approach
to compute the product between the Hessian and another vector. The
Hessian is usually too large to be explicitly computed or even
represented, but this method allows us to at least multiply by it
for the same big-O cost as backprop.
Implicit Hessian-vector products are the main practical, scalable way
of using second derivatives with neural networks. They allow us to
do things like construct Krylov subspaces and approximate conjugate
gradient descent.
Example: if `y` = 1/2 `x`^T A `x`, then `hessian_vector_product(y,
x, v)` will return an expression that evaluates to the same values
as (A + A.T) `v`.
Args:
ys: A scalar value, or a tensor or list of tensors to be summed to
yield a scalar.
xs: A list of tensors that we should construct the Hessian over.
v: A list of tensors, with the same shapes as xs, that we want to
multiply by the Hessian.
Returns:
A list of tensors (or if the list would be length 1, a single tensor)
containing the product between the Hessian and `v`.
Raises:
ValueError: `xs` and `v` have different length.
"""
# Validate the input
length = len(xs)
if len(v) != length:
raise ValueError("xs and v must have the same length.")
# First backprop
grads = gradients(ys, xs)
assert len(grads) == length
elemwise_products = [math_ops.mul(grad_elem, array_ops.stop_gradient(v_elem))
for grad_elem, v_elem in zip(grads, v)
if grad_elem is not None]
# Second backprop
return gradients(elemwise_products, xs)
| apache-2.0 |
tersmitten/ansible | contrib/inventory/rackhd.py | 52 | 3044 | #!/usr/bin/env python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import json
import os
import requests
import argparse
RACKHD_URL = 'http://localhost:8080'
class RackhdInventory(object):
def __init__(self, nodeids):
self._inventory = {}
for nodeid in nodeids:
self._load_inventory_data(nodeid)
inventory = {}
for (nodeid, info) in self._inventory.items():
inventory[nodeid] = (self._format_output(nodeid, info))
print(json.dumps(inventory))
def _load_inventory_data(self, nodeid):
info = {}
info['ohai'] = RACKHD_URL + '/api/common/nodes/{0}/catalogs/ohai'.format(nodeid)
info['lookup'] = RACKHD_URL + '/api/common/lookups/?q={0}'.format(nodeid)
results = {}
for (key, url) in info.items():
r = requests.get(url, verify=False)
results[key] = r.text
self._inventory[nodeid] = results
def _format_output(self, nodeid, info):
try:
node_info = json.loads(info['lookup'])
ipaddress = ''
if len(node_info) > 0:
ipaddress = node_info[0]['ipAddress']
output = {'hosts': [ipaddress], 'vars': {}}
for (key, result) in info.items():
output['vars'][key] = json.loads(result)
output['vars']['ansible_ssh_user'] = 'monorail'
except KeyError:
pass
return output
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--host')
parser.add_argument('--list', action='store_true')
return parser.parse_args()
try:
# check if rackhd url(ie:10.1.1.45:8080) is specified in the environment
RACKHD_URL = 'http://' + str(os.environ['RACKHD_URL'])
except Exception:
# use default values
pass
# Use the nodeid specified in the environment to limit the data returned
# or return data for all available nodes
nodeids = []
if (parse_args().host):
try:
nodeids += parse_args().host.split(',')
RackhdInventory(nodeids)
except Exception:
pass
if (parse_args().list):
try:
url = RACKHD_URL + '/api/common/nodes'
r = requests.get(url, verify=False)
data = json.loads(r.text)
for entry in data:
if entry['type'] == 'compute':
nodeids.append(entry['id'])
RackhdInventory(nodeids)
except Exception:
pass
| gpl-3.0 |
lightblu/ansible-modules-extras | system/selinux_permissive.py | 88 | 4067 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Michael Scherer <[email protected]>
# inspired by code of github.com/dandiker/
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: selinux_permissive
short_description: Change permissive domain in SELinux policy
description:
- Add and remove domain from the list of permissive domain.
version_added: "2.0"
options:
domain:
description:
- "the domain that will be added or removed from the list of permissive domains"
required: true
permissive:
description:
- "indicate if the domain should or should not be set as permissive"
required: true
choices: [ 'True', 'False' ]
no_reload:
description:
- "automatically reload the policy after a change"
- "default is set to 'false' as that's what most people would want after changing one domain"
- "Note that this doesn't work on older version of the library (example EL 6), the module will silently ignore it in this case"
required: false
default: False
choices: [ 'True', 'False' ]
store:
description:
- "name of the SELinux policy store to use"
required: false
default: null
notes:
- Requires a version of SELinux recent enough ( ie EL 6 or newer )
requirements: [ policycoreutils-python ]
author: Michael Scherer <[email protected]>
'''
EXAMPLES = '''
- selinux_permissive: name=httpd_t permissive=true
'''
HAVE_SEOBJECT = False
try:
import seobject
HAVE_SEOBJECT = True
except ImportError:
pass
def main():
module = AnsibleModule(
argument_spec=dict(
domain=dict(aliases=['name'], required=True),
store=dict(required=False, default=''),
permissive=dict(type='bool', required=True),
no_reload=dict(type='bool', required=False, default=False),
),
supports_check_mode=True
)
# global vars
changed = False
store = module.params['store']
permissive = module.params['permissive']
domain = module.params['domain']
no_reload = module.params['no_reload']
if not HAVE_SEOBJECT:
module.fail_json(changed=False, msg="policycoreutils-python required for this module")
try:
permissive_domains = seobject.permissiveRecords(store)
except ValueError, e:
module.fail_json(domain=domain, msg=str(e))
# not supported on EL 6
if 'set_reload' in dir(permissive_domains):
permissive_domains.set_reload(not no_reload)
try:
all_domains = permissive_domains.get_all()
except ValueError, e:
module.fail_json(domain=domain, msg=str(e))
if permissive:
if domain not in all_domains:
if not module.check_mode:
try:
permissive_domains.add(domain)
except ValueError, e:
module.fail_json(domain=domain, msg=str(e))
changed = True
else:
if domain in all_domains:
if not module.check_mode:
try:
permissive_domains.delete(domain)
except ValueError, e:
module.fail_json(domain=domain, msg=str(e))
changed = True
module.exit_json(changed=changed, store=store,
permissive=permissive, domain=domain)
#################################################
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
onyxfish/journalism | agate/aggregations/percentiles.py | 4 | 2233 | #!/usr/bin/env python
import math
from agate.aggregations.base import Aggregation
from agate.aggregations.has_nulls import HasNulls
from agate.data_types import Number
from agate.exceptions import DataTypeError
from agate.utils import Quantiles
from agate.warns import warn_null_calculation
class Percentiles(Aggregation):
"""
Divide a column into 100 equal-size groups using the "CDF" method.
See `this explanation <http://www.amstat.org/publications/jse/v14n3/langford.html>`_
of the various methods for computing percentiles.
"Zeroth" (min value) and "Hundredth" (max value) percentiles are included
for reference and intuitive indexing.
A reference implementation was provided by
`pycalcstats <https://code.google.com/p/pycalcstats/>`_.
This aggregation can not be applied to a :class:`.TableSet`.
:param column_name:
The name of a column containing :class:`.Number` data.
"""
def __init__(self, column_name):
self._column_name = column_name
def validate(self, table):
column = table.columns[self._column_name]
if not isinstance(column.data_type, Number):
raise DataTypeError('Percentiles can only be applied to columns containing Number data.')
has_nulls = HasNulls(self._column_name).run(table)
if has_nulls:
warn_null_calculation(self, column)
def run(self, table):
"""
:returns:
An instance of :class:`Quantiles`.
"""
column = table.columns[self._column_name]
data = column.values_without_nulls_sorted()
# Zeroth percentile is first datum
quantiles = [data[0]]
for percentile in range(1, 100):
k = len(data) * (float(percentile) / 100)
low = max(1, int(math.ceil(k)))
high = min(len(data), int(math.floor(k + 1)))
# No remainder
if low == high:
value = data[low - 1]
# Remainder
else:
value = (data[low - 1] + data[high - 1]) / 2
quantiles.append(value)
# Hundredth percentile is final datum
quantiles.append(data[-1])
return Quantiles(quantiles)
| mit |
Vijaysai005/KProject | vijay/DBSCAN/clustering/db/generate_data.py | 1 | 5801 | # usr/bin/env python
# -*- coding: utf-8 -*-
"""
Created on Thu Jul 20 13:15:05 2017
@author: Vijayasai S
"""
# Use python3
from haversine import distance
from datetime import datetime
from dateutil import tz
import my_dbscan as mydb
import alert_update as au
from pymongo import MongoClient
import pandas as pd
import time
from_zone = tz.tzutc()
to_zone = tz.tzlocal()
def _connect_mongo(host, port, username, password, db):
""" A util for making a connection to mongo """
if username and password:
mongo_uri = 'mongodb://%s:%s@%s:%s/%s' % (username, password, host, port, db)
conn = MongoClient(mongo_uri)
else:
conn = MongoClient(host, port)
return conn[db]
def read_mongo(db, collection, query={}, host='localhost', port=27017, username=None, password=None, no_id=True):
""" Read from Mongo and Store into DataFrame """
# Connect to MongoDB
db = _connect_mongo(host=host, port=port, username=username, password=password, db=db)
# Make a query to the specific DB and Collection
cursor = db[collection].find(query)
# Expand the cursor and construct the DataFrame
df = pd.DataFrame(list(cursor))
# Delete the _id
if no_id:
del df['_id']
return df
def Generate_data(get_col, set_col1, set_col2, time_delay, year, month, startday, endday, starthr, endhr, startmin, endmin):
id_dist = [] ; item_id_dist = []
main_curr_rank = {} ; tot_rank_curr = {}
count = 0
client = MongoClient('localhost', 27017)
db = client.maximus_db
for day in range(startday,endday+1):
for hr in range(starthr,endhr+1):
for mins in range(startmin,endmin+1,time_delay):
try:
#set_col1.drop()
#set_col2.drop()
mins_next = mins + time_delay
hr_next = hr
if time_delay + mins > 59:
mins_next = (time_delay + mins) - 60
hr_next += 1
if hr_next > 23:
hr_next = 0
day += 1
#print (hr,mins)
items = get_col.find({"$and" :[{"packettimestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"packettimestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}]},{"unit_id":1,"latitude":1,"longitude":1,"_id":0}).sort([("packettimestamp", -1)])
utc = datetime(year,month,day,hr,mins)
utc = utc.replace(tzinfo=from_zone)
# Convert time zone
ist = utc.astimezone(to_zone)
data = [] ; item_id = []
for item in items:
if item["unit_id"] not in item_id:
item_id.append(item["unit_id"])
data.append(item)
if item["unit_id"] not in item_id_dist:
item_id_dist.append(item["unit_id"])
id_dist.append(item)
u_id = [ids["unit_id"] for ids in id_dist]
if count > 0:
rank_curr = {} ; lat_curr = {} ; long_curr = {}
for item in item_id:
if item in u_id:
for i in range(len(id_dist)):
if item == id_dist[i]["unit_id"]:
for j in range(len(data)):
if item == data[j]["unit_id"]:
dist = distance(id_dist[i]["latitude"],data[j]["latitude"],id_dist[i]["longitude"],data[j]["longitude"])
id_dist[i]["latitude"] = data[j]["latitude"]
id_dist[i]["longitude"] = data[j]["longitude"]
rank_curr[item] = dist
lat_curr[item] = id_dist[i]["latitude"]
long_curr[item] = id_dist[i]["longitude"]
try:
tot_rank_curr[item] = dist + main_curr_rank[item]
main_curr_rank[item] = dist + main_curr_rank[item]
except Exception:
tot_rank_curr[item] = dist
main_curr_rank[item] = dist
#print (item, dist)
rank_current_sorted = sorted(rank_curr.values(), reverse=True)
tot_rank_current_sorted = sorted(tot_rank_curr.values(), reverse=True)
#rank,r_id,dist_rank = [],[],[]
for item in item_id:
if rank_curr[item] in rank_current_sorted:
set_col1.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":rank_curr[item], "unit_id":item, "rank":rank_current_sorted.index(rank_curr[item])+1,"timestamp":ist}])
set_col2.insert([{"latitude":lat_curr[item], "longitude":long_curr[item], "distance_by_interval":tot_rank_curr[item], "unit_id":item, "rank":tot_rank_current_sorted.index(tot_rank_curr[item])+1,"timestamp":ist}])
##########################################################################
# CREATING CLUSTERS AND SAVING IT IN DATABASE #
##########################################################################
table_to_read_1 = "tapola_rank_15_total"
eps = 5.0 # in KM
ride_id = None
coll_1 = db.tapola_rank_15_manual_clustering
df_1 = read_mongo("maximus_db", table_to_read_1, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id})
mydb.manual_DBSCAN(df_1, coll_1, eps)
print (ist)
print ("Creating cluster using manual dbscan algorithm")
##########################################################################
# CREATING ALERTS AND SAVING IT IN DATABASE #
##########################################################################
table_to_read_2 = "tapola_rank_15_manual_clustering"
df_2 = read_mongo("maximus_db", table_to_read_2, {"$and" :[{"timestamp":{"$gte":datetime(year,month,day,hr,mins,0)}},{"timestamp":{"$lte":datetime(year,month,day,hr_next,mins_next,0)}}], "ride_id":ride_id})
coll_2 = db.tapola_rank_15_manual_clus_alert
au.Generate_alert(df_2, coll_2)
print ("Generating alert and saving in the database\n")
time.sleep(1)
count += 1
except KeyError:
pass
return
| gpl-3.0 |
dalegregory/odoo | addons/website_blog/__init__.py | 373 | 1036 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import controllers
import models
import wizard
| agpl-3.0 |
google-research/google-research | darc/train_eval.py | 1 | 15970 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Main training script. See README.md for usage instructions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import os
import time
from absl import app
from absl import flags
from absl import logging
import classifiers
import darc_agent
import darc_envs
import gin
import numpy as np
from six.moves import range
import tensorflow as tf
from tf_agents.agents.ddpg import critic_network
from tf_agents.agents.sac import tanh_normal_projection_network
from tf_agents.drivers import dynamic_step_driver
from tf_agents.eval import metric_utils
from tf_agents.metrics import tf_metrics
from tf_agents.networks import actor_distribution_network
from tf_agents.policies import greedy_policy
from tf_agents.policies import random_tf_policy
from tf_agents.replay_buffers import tf_uniform_replay_buffer
from tf_agents.utils import common
flags.DEFINE_string(
"root_dir",
None,
"Root directory for writing logs/summaries/checkpoints.",
)
flags.DEFINE_multi_string("gin_file", None, "Path to the trainer config files.")
flags.DEFINE_multi_string("gin_bindings", None, "Gin binding to pass through.")
FLAGS = flags.FLAGS
@gin.configurable
def train_eval(
root_dir,
environment_name="broken_reacher",
num_iterations=1000000,
actor_fc_layers=(256, 256),
critic_obs_fc_layers=None,
critic_action_fc_layers=None,
critic_joint_fc_layers=(256, 256),
initial_collect_steps=10000,
real_initial_collect_steps=10000,
collect_steps_per_iteration=1,
real_collect_interval=10,
replay_buffer_capacity=1000000,
# Params for target update
target_update_tau=0.005,
target_update_period=1,
# Params for train
train_steps_per_iteration=1,
batch_size=256,
actor_learning_rate=3e-4,
critic_learning_rate=3e-4,
classifier_learning_rate=3e-4,
alpha_learning_rate=3e-4,
td_errors_loss_fn=tf.math.squared_difference,
gamma=0.99,
reward_scale_factor=0.1,
gradient_clipping=None,
use_tf_functions=True,
# Params for eval
num_eval_episodes=30,
eval_interval=10000,
# Params for summaries and logging
train_checkpoint_interval=10000,
policy_checkpoint_interval=5000,
rb_checkpoint_interval=50000,
log_interval=1000,
summary_interval=1000,
summaries_flush_secs=10,
debug_summaries=True,
summarize_grads_and_vars=False,
train_on_real=False,
delta_r_warmup=0,
random_seed=0,
checkpoint_dir=None,
):
"""A simple train and eval for SAC."""
np.random.seed(random_seed)
tf.random.set_seed(random_seed)
root_dir = os.path.expanduser(root_dir)
train_dir = os.path.join(root_dir, "train")
eval_dir = os.path.join(root_dir, "eval")
train_summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=summaries_flush_secs * 1000)
train_summary_writer.set_as_default()
eval_summary_writer = tf.compat.v2.summary.create_file_writer(
eval_dir, flush_millis=summaries_flush_secs * 1000)
if environment_name == "broken_reacher":
get_env_fn = darc_envs.get_broken_reacher_env
elif environment_name == "half_cheetah_obstacle":
get_env_fn = darc_envs.get_half_cheetah_direction_env
elif environment_name.startswith("broken_joint"):
base_name = environment_name.split("broken_joint_")[1]
get_env_fn = functools.partial(
darc_envs.get_broken_joint_env, env_name=base_name)
elif environment_name.startswith("falling"):
base_name = environment_name.split("falling_")[1]
get_env_fn = functools.partial(
darc_envs.get_falling_env, env_name=base_name)
else:
raise NotImplementedError("Unknown environment: %s" % environment_name)
eval_name_list = ["sim", "real"]
eval_env_list = [get_env_fn(mode) for mode in eval_name_list]
eval_metrics_list = []
for name in eval_name_list:
eval_metrics_list.append([
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes, name="AverageReturn_%s" % name),
])
global_step = tf.compat.v1.train.get_or_create_global_step()
with tf.compat.v2.summary.record_if(
lambda: tf.math.equal(global_step % summary_interval, 0)):
tf_env_real = get_env_fn("real")
if train_on_real:
tf_env = get_env_fn("real")
else:
tf_env = get_env_fn("sim")
time_step_spec = tf_env.time_step_spec()
observation_spec = time_step_spec.observation
action_spec = tf_env.action_spec()
actor_net = actor_distribution_network.ActorDistributionNetwork(
observation_spec,
action_spec,
fc_layer_params=actor_fc_layers,
continuous_projection_net=(
tanh_normal_projection_network.TanhNormalProjectionNetwork),
)
critic_net = critic_network.CriticNetwork(
(observation_spec, action_spec),
observation_fc_layer_params=critic_obs_fc_layers,
action_fc_layer_params=critic_action_fc_layers,
joint_fc_layer_params=critic_joint_fc_layers,
kernel_initializer="glorot_uniform",
last_kernel_initializer="glorot_uniform",
)
classifier = classifiers.build_classifier(observation_spec, action_spec)
tf_agent = darc_agent.DarcAgent(
time_step_spec,
action_spec,
actor_network=actor_net,
critic_network=critic_net,
classifier=classifier,
actor_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=actor_learning_rate),
critic_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=critic_learning_rate),
classifier_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=classifier_learning_rate),
alpha_optimizer=tf.compat.v1.train.AdamOptimizer(
learning_rate=alpha_learning_rate),
target_update_tau=target_update_tau,
target_update_period=target_update_period,
td_errors_loss_fn=td_errors_loss_fn,
gamma=gamma,
reward_scale_factor=reward_scale_factor,
gradient_clipping=gradient_clipping,
debug_summaries=debug_summaries,
summarize_grads_and_vars=summarize_grads_and_vars,
train_step_counter=global_step,
)
tf_agent.initialize()
# Make the replay buffer.
replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=1,
max_length=replay_buffer_capacity,
)
replay_observer = [replay_buffer.add_batch]
real_replay_buffer = tf_uniform_replay_buffer.TFUniformReplayBuffer(
data_spec=tf_agent.collect_data_spec,
batch_size=1,
max_length=replay_buffer_capacity,
)
real_replay_observer = [real_replay_buffer.add_batch]
sim_train_metrics = [
tf_metrics.NumberOfEpisodes(name="NumberOfEpisodesSim"),
tf_metrics.EnvironmentSteps(name="EnvironmentStepsSim"),
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageReturnSim",
),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageEpisodeLengthSim",
),
]
real_train_metrics = [
tf_metrics.NumberOfEpisodes(name="NumberOfEpisodesReal"),
tf_metrics.EnvironmentSteps(name="EnvironmentStepsReal"),
tf_metrics.AverageReturnMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageReturnReal",
),
tf_metrics.AverageEpisodeLengthMetric(
buffer_size=num_eval_episodes,
batch_size=tf_env.batch_size,
name="AverageEpisodeLengthReal",
),
]
eval_policy = greedy_policy.GreedyPolicy(tf_agent.policy)
initial_collect_policy = random_tf_policy.RandomTFPolicy(
tf_env.time_step_spec(), tf_env.action_spec())
collect_policy = tf_agent.collect_policy
train_checkpointer = common.Checkpointer(
ckpt_dir=train_dir,
agent=tf_agent,
global_step=global_step,
metrics=metric_utils.MetricsGroup(
sim_train_metrics + real_train_metrics, "train_metrics"),
)
policy_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, "policy"),
policy=eval_policy,
global_step=global_step,
)
rb_checkpointer = common.Checkpointer(
ckpt_dir=os.path.join(train_dir, "replay_buffer"),
max_to_keep=1,
replay_buffer=(replay_buffer, real_replay_buffer),
)
if checkpoint_dir is not None:
checkpoint_path = tf.train.latest_checkpoint(checkpoint_dir)
assert checkpoint_path is not None
train_checkpointer._load_status = train_checkpointer._checkpoint.restore( # pylint: disable=protected-access
checkpoint_path)
train_checkpointer._load_status.initialize_or_restore() # pylint: disable=protected-access
else:
train_checkpointer.initialize_or_restore()
rb_checkpointer.initialize_or_restore()
if replay_buffer.num_frames() == 0:
initial_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
initial_collect_policy,
observers=replay_observer + sim_train_metrics,
num_steps=initial_collect_steps,
)
real_initial_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env_real,
initial_collect_policy,
observers=real_replay_observer + real_train_metrics,
num_steps=real_initial_collect_steps,
)
collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env,
collect_policy,
observers=replay_observer + sim_train_metrics,
num_steps=collect_steps_per_iteration,
)
real_collect_driver = dynamic_step_driver.DynamicStepDriver(
tf_env_real,
collect_policy,
observers=real_replay_observer + real_train_metrics,
num_steps=collect_steps_per_iteration,
)
config_str = gin.operative_config_str()
logging.info(config_str)
with tf.compat.v1.gfile.Open(os.path.join(root_dir, "operative.gin"),
"w") as f:
f.write(config_str)
if use_tf_functions:
initial_collect_driver.run = common.function(initial_collect_driver.run)
real_initial_collect_driver.run = common.function(
real_initial_collect_driver.run)
collect_driver.run = common.function(collect_driver.run)
real_collect_driver.run = common.function(real_collect_driver.run)
tf_agent.train = common.function(tf_agent.train)
# Collect initial replay data.
if replay_buffer.num_frames() == 0:
logging.info(
"Initializing replay buffer by collecting experience for %d steps with "
"a random policy.",
initial_collect_steps,
)
initial_collect_driver.run()
real_initial_collect_driver.run()
for eval_name, eval_env, eval_metrics in zip(eval_name_list, eval_env_list,
eval_metrics_list):
metric_utils.eager_compute(
eval_metrics,
eval_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
summary_writer=eval_summary_writer,
summary_prefix="Metrics-%s" % eval_name,
)
metric_utils.log_metrics(eval_metrics)
time_step = None
real_time_step = None
policy_state = collect_policy.get_initial_state(tf_env.batch_size)
timed_at_step = global_step.numpy()
time_acc = 0
# Prepare replay buffer as dataset with invalid transitions filtered.
def _filter_invalid_transition(trajectories, unused_arg1):
return ~trajectories.is_boundary()[0]
dataset = (
replay_buffer.as_dataset(
sample_batch_size=batch_size, num_steps=2).unbatch().filter(
_filter_invalid_transition).batch(batch_size).prefetch(5))
real_dataset = (
real_replay_buffer.as_dataset(
sample_batch_size=batch_size, num_steps=2).unbatch().filter(
_filter_invalid_transition).batch(batch_size).prefetch(5))
# Dataset generates trajectories with shape [Bx2x...]
iterator = iter(dataset)
real_iterator = iter(real_dataset)
def train_step():
experience, _ = next(iterator)
real_experience, _ = next(real_iterator)
return tf_agent.train(experience, real_experience=real_experience)
if use_tf_functions:
train_step = common.function(train_step)
for _ in range(num_iterations):
start_time = time.time()
time_step, policy_state = collect_driver.run(
time_step=time_step,
policy_state=policy_state,
)
assert not policy_state # We expect policy_state == ().
if (global_step.numpy() % real_collect_interval == 0 and
global_step.numpy() >= delta_r_warmup):
real_time_step, policy_state = real_collect_driver.run(
time_step=real_time_step,
policy_state=policy_state,
)
for _ in range(train_steps_per_iteration):
train_loss = train_step()
time_acc += time.time() - start_time
global_step_val = global_step.numpy()
if global_step_val % log_interval == 0:
logging.info("step = %d, loss = %f", global_step_val, train_loss.loss)
steps_per_sec = (global_step_val - timed_at_step) / time_acc
logging.info("%.3f steps/sec", steps_per_sec)
tf.compat.v2.summary.scalar(
name="global_steps_per_sec", data=steps_per_sec, step=global_step)
timed_at_step = global_step_val
time_acc = 0
for train_metric in sim_train_metrics:
train_metric.tf_summaries(
train_step=global_step, step_metrics=sim_train_metrics[:2])
for train_metric in real_train_metrics:
train_metric.tf_summaries(
train_step=global_step, step_metrics=real_train_metrics[:2])
if global_step_val % eval_interval == 0:
for eval_name, eval_env, eval_metrics in zip(eval_name_list,
eval_env_list,
eval_metrics_list):
metric_utils.eager_compute(
eval_metrics,
eval_env,
eval_policy,
num_episodes=num_eval_episodes,
train_step=global_step,
summary_writer=eval_summary_writer,
summary_prefix="Metrics-%s" % eval_name,
)
metric_utils.log_metrics(eval_metrics)
if global_step_val % train_checkpoint_interval == 0:
train_checkpointer.save(global_step=global_step_val)
if global_step_val % policy_checkpoint_interval == 0:
policy_checkpointer.save(global_step=global_step_val)
if global_step_val % rb_checkpoint_interval == 0:
rb_checkpointer.save(global_step=global_step_val)
return train_loss
def main(_):
tf.compat.v1.enable_v2_behavior()
logging.set_verbosity(logging.INFO)
gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_bindings)
train_eval(FLAGS.root_dir)
if __name__ == "__main__":
flags.mark_flag_as_required("root_dir")
app.run(main)
| apache-2.0 |
cattleprod/samsung-kernel-gt-i9100 | external/webkit/WebKitTools/pywebsocket/test/test_dispatch.py | 3 | 11046 | #!/usr/bin/env python
#
# Copyright 2009, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests for dispatch module."""
import os
import unittest
import config # This must be imported before mod_pywebsocket.
from mod_pywebsocket import dispatch
import mock
_TEST_HANDLERS_DIR = os.path.join(
os.path.split(__file__)[0], 'testdata', 'handlers')
_TEST_HANDLERS_SUB_DIR = os.path.join(_TEST_HANDLERS_DIR, 'sub')
class DispatcherTest(unittest.TestCase):
def test_normalize_path(self):
self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
dispatch._normalize_path('/a/b'))
self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
dispatch._normalize_path('\\a\\b'))
self.assertEqual(os.path.abspath('/a/b').replace('\\', '/'),
dispatch._normalize_path('/a/c/../b'))
self.assertEqual(os.path.abspath('abc').replace('\\', '/'),
dispatch._normalize_path('abc'))
def test_converter(self):
converter = dispatch._path_to_resource_converter('/a/b')
self.assertEqual('/h', converter('/a/b/h_wsh.py'))
self.assertEqual('/c/h', converter('/a/b/c/h_wsh.py'))
self.assertEqual(None, converter('/a/b/h.py'))
self.assertEqual(None, converter('a/b/h_wsh.py'))
converter = dispatch._path_to_resource_converter('a/b')
self.assertEqual('/h', converter('a/b/h_wsh.py'))
converter = dispatch._path_to_resource_converter('/a/b///')
self.assertEqual('/h', converter('/a/b/h_wsh.py'))
self.assertEqual('/h', converter('/a/b/../b/h_wsh.py'))
converter = dispatch._path_to_resource_converter('/a/../a/b/../b/')
self.assertEqual('/h', converter('/a/b/h_wsh.py'))
converter = dispatch._path_to_resource_converter(r'\a\b')
self.assertEqual('/h', converter(r'\a\b\h_wsh.py'))
self.assertEqual('/h', converter(r'/a/b/h_wsh.py'))
def test_source_file_paths(self):
paths = list(dispatch._source_file_paths(_TEST_HANDLERS_DIR))
paths.sort()
self.assertEqual(7, len(paths))
expected_paths = [
os.path.join(_TEST_HANDLERS_DIR, 'blank_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'origin_check_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub',
'exception_in_transfer_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub', 'non_callable_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub', 'plain_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub',
'wrong_handshake_sig_wsh.py'),
os.path.join(_TEST_HANDLERS_DIR, 'sub',
'wrong_transfer_sig_wsh.py'),
]
for expected, actual in zip(expected_paths, paths):
self.assertEqual(expected, actual)
def test_source(self):
self.assertRaises(dispatch.DispatchError, dispatch._source, '')
self.assertRaises(dispatch.DispatchError, dispatch._source, 'def')
self.assertRaises(dispatch.DispatchError, dispatch._source, '1/0')
self.failUnless(dispatch._source(
'def web_socket_do_extra_handshake(request):pass\n'
'def web_socket_transfer_data(request):pass\n'))
def test_source_warnings(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
warnings = dispatcher.source_warnings()
warnings.sort()
expected_warnings = [
(os.path.join(_TEST_HANDLERS_DIR, 'blank_wsh.py') +
': web_socket_do_extra_handshake is not defined.'),
(os.path.join(_TEST_HANDLERS_DIR, 'sub',
'non_callable_wsh.py') +
': web_socket_do_extra_handshake is not callable.'),
(os.path.join(_TEST_HANDLERS_DIR, 'sub',
'wrong_handshake_sig_wsh.py') +
': web_socket_do_extra_handshake is not defined.'),
(os.path.join(_TEST_HANDLERS_DIR, 'sub',
'wrong_transfer_sig_wsh.py') +
': web_socket_transfer_data is not defined.'),
]
self.assertEquals(4, len(warnings))
for expected, actual in zip(expected_warnings, warnings):
self.assertEquals(expected, actual)
def test_do_extra_handshake(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest()
request.ws_resource = '/origin_check'
request.ws_origin = 'http://example.com'
dispatcher.do_extra_handshake(request) # Must not raise exception.
request.ws_origin = 'http://bad.example.com'
self.assertRaises(Exception, dispatcher.do_extra_handshake, request)
def test_transfer_data(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = '/origin_check'
request.ws_protocol = 'p1'
dispatcher.transfer_data(request)
self.assertEqual('origin_check_wsh.py is called for /origin_check, p1',
request.connection.written_data())
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = '/sub/plain'
request.ws_protocol = None
dispatcher.transfer_data(request)
self.assertEqual('sub/plain_wsh.py is called for /sub/plain, None',
request.connection.written_data())
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = '/sub/plain?'
request.ws_protocol = None
dispatcher.transfer_data(request)
self.assertEqual('sub/plain_wsh.py is called for /sub/plain?, None',
request.connection.written_data())
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = '/sub/plain?q=v'
request.ws_protocol = None
dispatcher.transfer_data(request)
self.assertEqual('sub/plain_wsh.py is called for /sub/plain?q=v, None',
request.connection.written_data())
def test_transfer_data_no_handler(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
for resource in ['/blank', '/sub/non_callable',
'/sub/no_wsh_at_the_end', '/does/not/exist']:
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = resource
request.ws_protocol = 'p2'
try:
dispatcher.transfer_data(request)
self.fail()
except dispatch.DispatchError, e:
self.failUnless(str(e).find('No handler') != -1)
except Exception:
self.fail()
def test_transfer_data_handler_exception(self):
dispatcher = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
request = mock.MockRequest(connection=mock.MockConn(''))
request.ws_resource = '/sub/exception_in_transfer'
request.ws_protocol = 'p3'
try:
dispatcher.transfer_data(request)
self.fail()
except Exception, e:
self.failUnless(str(e).find('Intentional') != -1)
def test_scan_dir(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
self.assertEqual(3, len(disp._handlers))
self.failUnless(disp._handlers.has_key('/origin_check'))
self.failUnless(disp._handlers.has_key('/sub/exception_in_transfer'))
self.failUnless(disp._handlers.has_key('/sub/plain'))
def test_scan_sub_dir(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, _TEST_HANDLERS_SUB_DIR)
self.assertEqual(2, len(disp._handlers))
self.failIf(disp._handlers.has_key('/origin_check'))
self.failUnless(disp._handlers.has_key('/sub/exception_in_transfer'))
self.failUnless(disp._handlers.has_key('/sub/plain'))
def test_scan_sub_dir_as_root(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_SUB_DIR,
_TEST_HANDLERS_SUB_DIR)
self.assertEqual(2, len(disp._handlers))
self.failIf(disp._handlers.has_key('/origin_check'))
self.failIf(disp._handlers.has_key('/sub/exception_in_transfer'))
self.failIf(disp._handlers.has_key('/sub/plain'))
self.failUnless(disp._handlers.has_key('/exception_in_transfer'))
self.failUnless(disp._handlers.has_key('/plain'))
def test_scan_dir_must_under_root(self):
dispatch.Dispatcher('a/b', 'a/b/c') # OK
dispatch.Dispatcher('a/b///', 'a/b') # OK
self.assertRaises(dispatch.DispatchError,
dispatch.Dispatcher, 'a/b/c', 'a/b')
def test_resource_path_alias(self):
disp = dispatch.Dispatcher(_TEST_HANDLERS_DIR, None)
disp.add_resource_path_alias('/', '/origin_check')
self.assertEqual(4, len(disp._handlers))
self.failUnless(disp._handlers.has_key('/origin_check'))
self.failUnless(disp._handlers.has_key('/sub/exception_in_transfer'))
self.failUnless(disp._handlers.has_key('/sub/plain'))
self.failUnless(disp._handlers.has_key('/'))
self.assertRaises(dispatch.DispatchError,
disp.add_resource_path_alias, '/alias', '/not-exist')
if __name__ == '__main__':
unittest.main()
# vi:sts=4 sw=4 et
| gpl-2.0 |
Nkleppan/CSE-360 | image_space_app/tests/tests.py | 1 | 4528 | import datetime
from django.test.client import Client
from django.utils import timezone
from django.test import TestCase
from signups.models import SignUp, Event
from django.test import LiveServerTestCase
from django.contrib.auth.models import User
from selenium import webdriver
c=Client()
class ImageSpaceTests(TestCase):
def testsignup(self):
response=c.get('/signup/')
def setUP(self):
self.user== User.objects.create_user(username='Adam',email='[email protected]',password='password')
user.save()
admin=User.objects.create_superuser('root', '[email protected]', 'testing')
def testauthview(self):
response=c.get('/authview/')
def test1(self):
test1=c.login(username='Adam',password='password')
def testinvalid(self):
response=c.get('/invalid/')
def test2(self):
if(c.login(username='Adam',password='wrongpassword')==False):
test2=True
def test3(self):
if(c.login(username='Wrongname',password='password')==False):
test3=True
def testrootpage(self):
response=c.get('/root/')
def testroot(self):
if(c.login(username='root',password='testing')==True):
testroot=True
class Seleniumtests(LiveServerTestCase):
def setUp(self):
User.objects.create_superuser(username='admin',password='admin',email='[email protected]')
self.selenium = webdriver.Firefox()
self.selenium.implicitly_wait(3)
self.selenium.maximize_window()
super(Seleniumtests, self).setUp()
def tearDown(self):
self.selenium.quit()
super(Seleniumtests, self).tearDown()
def test_root(self):
self.selenium.get('%s%s' % (self.live_server_url, "/admin/"))
username = self.selenium.find_element_by_id("id_username")
username.send_keys("admin")
password = self.selenium.find_element_by_id("id_password")
password.send_keys("admin")
self.selenium.find_element_by_xpath('//input[@value="Log in"]').click()
self.selenium.get('%s%s' % (self.live_server_url, "/admin/auth/user/add/"))
def test_create_Event(self):
self.selenium.get('%s%s' % (self.live_server_url, "/admin/signups/event/add/"))
username = self.selenium.find_element_by_id("id_username")
username.send_keys("admin")
password = self.selenium.find_element_by_id("id_password")
password.send_keys("admin")
self.selenium.find_element_by_xpath('//input[@value="Log in"]').click()
# self.selenium.get('%s%s' % (self.live_server_url, "/admin/signups/event/add/"))
title = self.selenium.find_element_by_name("title")
title.send_keys("Monster Trucks")
eventtype = self.selenium.find_element_by_name("type")
eventtype.send_keys("Motor Sport?")
venue = self.selenium.find_element_by_name("venue")
venue.send_keys("Wild Horse Pass")
thedate = self.selenium.find_element_by_name("date")
thedate.send_keys("2014-12-25")
thetime = self.selenium.find_element_by_name("time")
thetime.send_keys("00:42:42")
num1 = self.selenium.find_element_by_name("current_avail")
num1.send_keys("42")
num2 = self.selenium.find_element_by_name("total_avail")
num2.send_keys("424242")
self.selenium.find_element_by_xpath('//input[@value="Save"]').click()
def test_signup(self):
self.selenium.get('%s%s' % (self.live_server_url, "/"))
self.selenium.get('%s%s' % (self.live_server_url, "/signup/"))
username = self.selenium.find_element_by_name("username")
username.send_keys("test")
password = self.selenium.find_element_by_name("password1")
password.send_keys("test")
password = self.selenium.find_element_by_name("password2")
password.send_keys("test")
email = self.selenium.find_element_by_name("email")
email.send_keys("[email protected]")
self.selenium.find_element_by_xpath('//input[@value="Submit"]').click()
def test_login(self):
self.selenium.get('%s%s' % (self.live_server_url, "/"))
username = self.selenium.find_element_by_name("username")
username.send_keys("test")
password = self.selenium.find_element_by_name("password")
password.send_keys("test")
self.selenium.find_element_by_xpath('//input[@value="Log In"]').click()
| gpl-2.0 |
BlackSmith/GreenTea | lib/gitconfig/config.py | 2 | 1122 | import os
from section import section
from shell import shell
class config(object):
filename = None
def __init__(self, filename):
self.filename = os.path.expanduser(filename)
def __getattr__(self, name):
return section(self, name) # called if self.key not exists
@property
def sh(self):
return 'git config --file %s' % self.filename
def get(self, s, k):
try:
return shell(self.sh + " %s.%s" % (s, k))
except:
return None
def set(self, s, k, v):
shell(self.sh + ' %s.%s "%s"' % (s, k, str(v)))
def unset(self, s, k):
try:
shell(self.sh + " --unset %s.%s" % (s, k))
except:
pass
@property
def list(self):
if os.path.exists(self.filename):
return shell(self.sh + " --list").splitlines()
else:
return []
@property
def exists(self):
return os.path.exists(self.filename)
def delete(self):
if self.exists:
os.unlink(self.filename)
def __str__(self):
return self.filename
| gpl-2.0 |
alfa-addon/addon | plugin.video.alfa/lib/python_libtorrent/linux_aarch64_ucs2/1.1.0/__init__.py | 362 | 1240 | #-*- coding: utf-8 -*-
'''
python-libtorrent for Kodi (script.module.libtorrent)
Copyright (C) 2015-2016 DiMartino, srg70, RussakHH, aisman
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
'''
| gpl-3.0 |
kalrey/swift | swift/obj/mem_server.py | 6 | 4317 | # Copyright (c) 2010-2013 OpenStack, LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" In-Memory Object Server for Swift """
import os
from swift import gettext_ as _
from eventlet import Timeout
from swift.common.bufferedhttp import http_connect
from swift.common.exceptions import ConnectionTimeout
from swift.common.http import is_success
from swift.obj.mem_diskfile import InMemoryFileSystem
from swift.obj import server
class ObjectController(server.ObjectController):
"""
Implements the WSGI application for the Swift In-Memory Object Server.
"""
def setup(self, conf):
"""
Nothing specific to do for the in-memory version.
:param conf: WSGI configuration parameter
"""
self._filesystem = InMemoryFileSystem()
def get_diskfile(self, device, partition, account, container, obj,
**kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._filesystem.get_diskfile(account, container, obj, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy_idx):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy_idx: the associated storage policy index
"""
headers_out['user-agent'] = 'object-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_(
'ERROR Container update failed: %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s'),
{'ip': ip, 'port': port, 'dev': contdevice})
# FIXME: For now don't handle async updates
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
"""
pass
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
| apache-2.0 |
BT-fgarbely/odoo | addons/base_gengo/wizard/__init__.py | 434 | 1077 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base_gengo_translations
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Etwigg/Examples | Group Project Website/venv/Lib/site-packages/pip/_vendor/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| mit |
rapilabs/django | django/utils/datetime_safe.py | 535 | 2836 | # Python's datetime strftime doesn't handle dates before 1900.
# These classes override date and datetime to support the formatting of a date
# through its full "proleptic Gregorian" date range.
#
# Based on code submitted to comp.lang.python by Andrew Dalke
#
# >>> datetime_safe.date(1850, 8, 2).strftime("%Y/%m/%d was a %A")
# '1850/08/02 was a Friday'
import re
import time as ttime
from datetime import (
date as real_date, datetime as real_datetime, time as real_time,
)
class date(real_date):
def strftime(self, fmt):
return strftime(self, fmt)
class datetime(real_datetime):
def strftime(self, fmt):
return strftime(self, fmt)
@classmethod
def combine(cls, date, time):
return cls(date.year, date.month, date.day,
time.hour, time.minute, time.second,
time.microsecond, time.tzinfo)
def date(self):
return date(self.year, self.month, self.day)
class time(real_time):
pass
def new_date(d):
"Generate a safe date from a datetime.date object."
return date(d.year, d.month, d.day)
def new_datetime(d):
"""
Generate a safe datetime from a datetime.date or datetime.datetime object.
"""
kw = [d.year, d.month, d.day]
if isinstance(d, real_datetime):
kw.extend([d.hour, d.minute, d.second, d.microsecond, d.tzinfo])
return datetime(*kw)
# This library does not support strftime's "%s" or "%y" format strings.
# Allowed if there's an even number of "%"s because they are escaped.
_illegal_formatting = re.compile(r"((^|[^%])(%%)*%[sy])")
def _findall(text, substr):
# Also finds overlaps
sites = []
i = 0
while 1:
j = text.find(substr, i)
if j == -1:
break
sites.append(j)
i = j + 1
return sites
def strftime(dt, fmt):
if dt.year >= 1900:
return super(type(dt), dt).strftime(fmt)
illegal_formatting = _illegal_formatting.search(fmt)
if illegal_formatting:
raise TypeError("strftime of dates before 1900 does not handle" + illegal_formatting.group(0))
year = dt.year
# For every non-leap year century, advance by
# 6 years to get into the 28-year repeat cycle
delta = 2000 - year
off = 6 * (delta // 100 + delta // 400)
year = year + off
# Move to around the year 2000
year = year + ((2000 - year) // 28) * 28
timetuple = dt.timetuple()
s1 = ttime.strftime(fmt, (year,) + timetuple[1:])
sites1 = _findall(s1, str(year))
s2 = ttime.strftime(fmt, (year + 28,) + timetuple[1:])
sites2 = _findall(s2, str(year + 28))
sites = []
for site in sites1:
if site in sites2:
sites.append(site)
s = s1
syear = "%04d" % (dt.year,)
for site in sites:
s = s[:site] + syear + s[site + 4:]
return s
| bsd-3-clause |
upshot-nutrition/upshot-nutrition.github.io | node_modules/node-gyp/gyp/pylib/gyp/input_test.py | 1841 | 3207 | #!/usr/bin/env python
# Copyright 2013 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unit tests for the input.py file."""
import gyp.input
import unittest
import sys
class TestFindCycles(unittest.TestCase):
def setUp(self):
self.nodes = {}
for x in ('a', 'b', 'c', 'd', 'e'):
self.nodes[x] = gyp.input.DependencyGraphNode(x)
def _create_dependency(self, dependent, dependency):
dependent.dependencies.append(dependency)
dependency.dependents.append(dependent)
def test_no_cycle_empty_graph(self):
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_line(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_no_cycle_dag(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['a'], self.nodes['c'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
for label, node in self.nodes.iteritems():
self.assertEquals([], node.FindCycles())
def test_cycle_self_reference(self):
self._create_dependency(self.nodes['a'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['a']]],
self.nodes['a'].FindCycles())
def test_cycle_two_nodes(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self.assertEquals([[self.nodes['a'], self.nodes['b'], self.nodes['a']]],
self.nodes['a'].FindCycles())
self.assertEquals([[self.nodes['b'], self.nodes['a'], self.nodes['b']]],
self.nodes['b'].FindCycles())
def test_two_cycles(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['a'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['b'])
cycles = self.nodes['a'].FindCycles()
self.assertTrue(
[self.nodes['a'], self.nodes['b'], self.nodes['a']] in cycles)
self.assertTrue(
[self.nodes['b'], self.nodes['c'], self.nodes['b']] in cycles)
self.assertEquals(2, len(cycles))
def test_big_cycle(self):
self._create_dependency(self.nodes['a'], self.nodes['b'])
self._create_dependency(self.nodes['b'], self.nodes['c'])
self._create_dependency(self.nodes['c'], self.nodes['d'])
self._create_dependency(self.nodes['d'], self.nodes['e'])
self._create_dependency(self.nodes['e'], self.nodes['a'])
self.assertEquals([[self.nodes['a'],
self.nodes['b'],
self.nodes['c'],
self.nodes['d'],
self.nodes['e'],
self.nodes['a']]],
self.nodes['a'].FindCycles())
if __name__ == '__main__':
unittest.main()
| mit |
JonGal/appengine-ml-demo | app/lib/oauth2client/contrib/gce.py | 39 | 5431 | # Copyright 2014 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for Google Compute Engine
Utilities for making it easier to use OAuth 2.0 on Google Compute Engine.
"""
import logging
import warnings
from six.moves import http_client
from oauth2client import client
from oauth2client.contrib import _metadata
logger = logging.getLogger(__name__)
_SCOPES_WARNING = """\
You have requested explicit scopes to be used with a GCE service account.
Using this argument will have no effect on the actual scopes for tokens
requested. These scopes are set at VM instance creation time and
can't be overridden in the request.
"""
class AppAssertionCredentials(client.AssertionCredentials):
"""Credentials object for Compute Engine Assertion Grants
This object will allow a Compute Engine instance to identify itself to
Google and other OAuth 2.0 servers that can verify assertions. It can be
used for the purpose of accessing data stored under an account assigned to
the Compute Engine instance itself.
This credential does not require a flow to instantiate because it
represents a two legged flow, and therefore has all of the required
information to generate and refresh its own access tokens.
Note that :attr:`service_account_email` and :attr:`scopes`
will both return None until the credentials have been refreshed.
To check whether credentials have previously been refreshed use
:attr:`invalid`.
"""
def __init__(self, email=None, *args, **kwargs):
"""Constructor for AppAssertionCredentials
Args:
email: an email that specifies the service account to use.
Only necessary if using custom service accounts
(see https://cloud.google.com/compute/docs/access/create-enable-service-accounts-for-instances#createdefaultserviceaccount).
"""
if 'scopes' in kwargs:
warnings.warn(_SCOPES_WARNING)
kwargs['scopes'] = None
# Assertion type is no longer used, but still in the
# parent class signature.
super(AppAssertionCredentials, self).__init__(None, *args, **kwargs)
self.service_account_email = email
self.scopes = None
self.invalid = True
@classmethod
def from_json(cls, json_data):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def to_json(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def retrieve_scopes(self, http):
"""Retrieves the canonical list of scopes for this access token.
Overrides client.Credentials.retrieve_scopes. Fetches scopes info
from the metadata server.
Args:
http: httplib2.Http, an http object to be used to make the refresh
request.
Returns:
A set of strings containing the canonical list of scopes.
"""
self._retrieve_info(http)
return self.scopes
def _retrieve_info(self, http):
"""Retrieves service account info for invalid credentials.
Args:
http: an object to be used to make HTTP requests.
"""
if self.invalid:
info = _metadata.get_service_account_info(
http,
service_account=self.service_account_email or 'default')
self.invalid = False
self.service_account_email = info['email']
self.scopes = info['scopes']
def _refresh(self, http):
"""Refreshes the access token.
Skip all the storage hoops and just refresh using the API.
Args:
http: an object to be used to make HTTP requests.
Raises:
HttpAccessTokenRefreshError: When the refresh fails.
"""
try:
self._retrieve_info(http)
self.access_token, self.token_expiry = _metadata.get_token(
http, service_account=self.service_account_email)
except http_client.HTTPException as err:
raise client.HttpAccessTokenRefreshError(str(err))
@property
def serialization_data(self):
raise NotImplementedError(
'Cannot serialize credentials for GCE service accounts.')
def create_scoped_required(self):
return False
def sign_blob(self, blob):
"""Cryptographically sign a blob (of bytes).
This method is provided to support a common interface, but
the actual key used for a Google Compute Engine service account
is not available, so it can't be used to sign content.
Args:
blob: bytes, Message to be signed.
Raises:
NotImplementedError, always.
"""
raise NotImplementedError(
'Compute Engine service accounts cannot sign blobs')
| apache-2.0 |
Usherwood/usherwood_ds | usherwood_ds/nlp/n_grams/processes.py | 1 | 3607 | #!/usr/bin/env python
"""Functions designed to help n_grams>usherwood_ds run but shouldnt ever need to be called directly by the user."""
import pandas as pd
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
__author__ = "Peter J Usherwood"
__python_version__ = "3.6"
def generate_ngrams(data,
min_gram,
max_gram,
text_field_key='Snippet',
max_features=1000,
tfidf=True,
pos_tuples=False):
"""
The usherwood_ds code for generating the ngrams used by the primary class
:param data: The usherwood_ds pandas dataframe
:param min_gram: Int, The minimum n
:param max_gram: Int, The maximim n
:param text_field_key: The name of the text field (by default Snippet)
:param max_features: Int the maximum number of features to generate
:param tfidf: Bool, whether to use the rate countvectorizer instead of the deafult counts one
:param pos_tuples: Bool, if text_key_field is a list of pos_tuples set this to true
:return:
"""
if pos_tuples:
_pos_ngrams = create_pos_ngrams(min_gram, max_gram)
def my_analyzer(tokens):
"""
Custom analyser for the countvectorizer when using pos tuples
:param tokens: List of pos tuples (one record at a time)
:return: List of ngram tokens
"""
tokens = [str(tup) for tup in tokens]
return _pos_ngrams(tokens)
text = data[text_field_key].values.tolist()
if tfidf:
cv = TfidfVectorizer(max_features=max_features, preprocessor=None, analyzer=my_analyzer)
else:
cv = CountVectorizer(max_features=max_features, preprocessor=None, analyzer=my_analyzer)
word_frequency_matrix = cv.fit_transform(raw_documents=text)
else:
text = data[text_field_key]
if tfidf:
cv = TfidfVectorizer(ngram_range=(min_gram, max_gram), max_features=max_features)
else:
cv = CountVectorizer(ngram_range=(min_gram, max_gram), max_features=max_features)
word_frequency_matrix = cv.fit_transform(raw_documents=text.values.astype('U'))
print(word_frequency_matrix.shape)
freqs = [(word, word_frequency_matrix.getcol(idx).sum(), idx) for word, idx in cv.vocabulary_.items()]
ngrams = pd.DataFrame(freqs, columns=['Ngram','Frequency','Index'])
ngrams.sort_values(by=['Frequency'], ascending=False, inplace=True)
ngrams.reset_index(drop=True, inplace=True)
return ngrams, word_frequency_matrix, cv
def create_pos_ngrams(min_gram, max_gram):
"""
Create the custom ngram creator used with the custom analyser for pos tuples
:param min_gram: Int, min gram
:param max_gram: Int, max gram
:return: Custom ngram creator used with the custom analyser for pos tuples
"""
def _pos_ngrams(tokens):
"""
Custom ngram creator used with the custom analyser for pos tuples
:param tokens: List of pos tuples
:return: List of ngram tokens
"""
min_n, max_n = min_gram, max_gram
if max_n != 1:
original_tokens = tokens
tokens = []
n_original_tokens = len(original_tokens)
for n in range(min_n,min(max_n + 1, n_original_tokens + 1)):
for i in range(n_original_tokens - n + 1):
tokens.append(" ".join(original_tokens[i: i + n]))
return tokens
return _pos_ngrams
| bsd-2-clause |
SUSE/lrbd | test/test_luns.py | 2 | 3550 |
from lrbd import Luns, Common, Runtime, entries
import unittest, mock
import re, tempfile
import logging
class LunsTestCase(unittest.TestCase):
def setUp(self):
Common.config = {
"iqns": [ "iqn.xyz" ],
"pools": [
{ "pool": "rbd",
"gateways": [
{ "host": "igw1", "tpg": [
{ "image": "archive" }
]
} ]
} ] }
def test_lun(self):
class mock_Luns(Luns):
def _find(self):
pass
def _cmd(self, target, tpg, address):
self.called = " ".join([ target, str(tpg), address ])
self.l = mock_Luns(None)
assert self.l.called == "iqn.xyz 1 archive"
@mock.patch('glob.glob')
def test_find(self, mock_subproc_glob):
mock_subproc_glob = []
class mock_Luns(Luns):
def _cmd(self, target, tpg, address):
self.called = " ".join([ target, str(tpg), address ])
self.l = mock_Luns(None)
assert self.l.exists == {'iqn.xyz': {}}
@mock.patch('glob.glob')
def test_find_existing(self, mock_subproc_glob):
class mock_Luns(Luns):
def _cmd(self, target, tpg, address):
self.called = " ".join([ target, str(tpg), address ])
with tempfile.NamedTemporaryFile(suffix="._1_1_1_1_1_1_tmp") as tmpfile:
tmpfile.write("/dev/rbd/rbd/archive\n")
tmpfile.flush()
mock_subproc_glob.return_value = [ tmpfile.name ]
self.l = mock_Luns(None)
assert self.l.exists == {'iqn.xyz': {'1': ['archive']}}
def test_cmd_for_rbd(self):
Runtime.config['backstore'] = "rbd"
class mock_Luns(Luns):
def _find(self):
pass
class mock_LunAssignment(object):
def assign(self, target, tpg, image, lun):
pass
def assigned(self, target, image):
pass
logging.disable(logging.DEBUG)
_la = mock_LunAssignment()
self.l = mock_Luns(_la)
print self.l.unassigned
assert self.l.unassigned == [ ['targetcli', '/iscsi/iqn.xyz/tpg1/luns', 'create', '/backstores/rbd/rbd-archive'] ]
@mock.patch('lrbd.Popen')
def test_create_nothing(self, mock_subproc_popen):
mock_subproc_popen.return_value.returncode = 0
class mock_Luns(Luns):
def _find(self):
pass
def _cmd(self, target, tpg, address):
self.called = " ".join([ target, str(tpg), address ])
def disable_auto_add_mapped_luns(self):
pass
self.l = mock_Luns(None)
self.l.cmds = [[ "targetcli", "hello" ]]
self.l.create()
assert mock_subproc_popen.called
@mock.patch('lrbd.Popen')
def test_create(self, mock_subproc_popen):
mock_subproc_popen.return_value.returncode = 0
class mock_Luns(Luns):
def _find(self):
pass
def disable_auto_add_mapped_luns(self):
pass
class mock_LunAssignment(object):
def assign(self, target, tpg, image, lun):
pass
def assigned(self, target, image):
pass
_la = mock_LunAssignment()
self.l = mock_Luns(_la)
self.l.cmds = [[ "targetcli", "hello" ]]
self.l.create()
assert mock_subproc_popen.called
| lgpl-2.1 |
blacklin/kbengine | kbe/src/lib/python/Lib/plat-freebsd7/IN.py | 344 | 12956 | # Generated by h2py from /usr/include/netinet/in.h
# Included from sys/cdefs.h
__GNUCLIKE_ASM = 3
__GNUCLIKE_ASM = 2
__GNUCLIKE___TYPEOF = 1
__GNUCLIKE___OFFSETOF = 1
__GNUCLIKE___SECTION = 1
__GNUCLIKE_ATTRIBUTE_MODE_DI = 1
__GNUCLIKE_CTOR_SECTION_HANDLING = 1
__GNUCLIKE_BUILTIN_CONSTANT_P = 1
__GNUCLIKE_BUILTIN_VARARGS = 1
__GNUCLIKE_BUILTIN_STDARG = 1
__GNUCLIKE_BUILTIN_VAALIST = 1
__GNUC_VA_LIST_COMPATIBILITY = 1
__GNUCLIKE_BUILTIN_NEXT_ARG = 1
__GNUCLIKE_BUILTIN_MEMCPY = 1
__CC_SUPPORTS_INLINE = 1
__CC_SUPPORTS___INLINE = 1
__CC_SUPPORTS___INLINE__ = 1
__CC_SUPPORTS___FUNC__ = 1
__CC_SUPPORTS_WARNING = 1
__CC_SUPPORTS_VARADIC_XXX = 1
__CC_SUPPORTS_DYNAMIC_ARRAY_INIT = 1
__CC_INT_IS_32BIT = 1
def __P(protos): return protos
def __STRING(x): return #x
def __XSTRING(x): return __STRING(x)
def __P(protos): return ()
def __STRING(x): return "x"
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __aligned(x): return __attribute__((__aligned__(x)))
def __section(x): return __attribute__((__section__(x)))
def __nonnull(x): return __attribute__((__nonnull__(x)))
def __predict_true(exp): return __builtin_expect((exp), 1)
def __predict_false(exp): return __builtin_expect((exp), 0)
def __predict_true(exp): return (exp)
def __predict_false(exp): return (exp)
def __format_arg(fmtarg): return __attribute__((__format_arg__ (fmtarg)))
def __FBSDID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID(s): return __IDSTRING(__CONCAT(__rcsid_,__LINE__),s)
def __RCSID_SOURCE(s): return __IDSTRING(__CONCAT(__rcsid_source_,__LINE__),s)
def __SCCSID(s): return __IDSTRING(__CONCAT(__sccsid_,__LINE__),s)
def __COPYRIGHT(s): return __IDSTRING(__CONCAT(__copyright_,__LINE__),s)
_POSIX_C_SOURCE = 199009
_POSIX_C_SOURCE = 199209
__XSI_VISIBLE = 600
_POSIX_C_SOURCE = 200112
__XSI_VISIBLE = 500
_POSIX_C_SOURCE = 199506
_POSIX_C_SOURCE = 198808
__POSIX_VISIBLE = 200112
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 199506
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199309
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199209
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 199009
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 198808
__ISO_C_VISIBLE = 0
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1990
__POSIX_VISIBLE = 0
__XSI_VISIBLE = 0
__BSD_VISIBLE = 0
__ISO_C_VISIBLE = 1999
__POSIX_VISIBLE = 200112
__XSI_VISIBLE = 600
__BSD_VISIBLE = 1
__ISO_C_VISIBLE = 1999
# Included from sys/_types.h
# Included from machine/_types.h
# Included from machine/endian.h
_QUAD_HIGHWORD = 1
_QUAD_LOWWORD = 0
_LITTLE_ENDIAN = 1234
_BIG_ENDIAN = 4321
_PDP_ENDIAN = 3412
_BYTE_ORDER = _LITTLE_ENDIAN
LITTLE_ENDIAN = _LITTLE_ENDIAN
BIG_ENDIAN = _BIG_ENDIAN
PDP_ENDIAN = _PDP_ENDIAN
BYTE_ORDER = _BYTE_ORDER
def __word_swap_int_var(x): return \
def __word_swap_int_const(x): return \
def __word_swap_int(x): return __word_swap_int_var(x)
def __byte_swap_int_var(x): return \
def __byte_swap_int_const(x): return \
def __byte_swap_int(x): return __byte_swap_int_var(x)
def __byte_swap_word_var(x): return \
def __byte_swap_word_const(x): return \
def __byte_swap_word(x): return __byte_swap_word_var(x)
def __htonl(x): return __bswap32(x)
def __htons(x): return __bswap16(x)
def __ntohl(x): return __bswap32(x)
def __ntohs(x): return __bswap16(x)
IPPROTO_IP = 0
IPPROTO_ICMP = 1
IPPROTO_TCP = 6
IPPROTO_UDP = 17
def htonl(x): return __htonl(x)
def htons(x): return __htons(x)
def ntohl(x): return __ntohl(x)
def ntohs(x): return __ntohs(x)
IPPROTO_RAW = 255
INET_ADDRSTRLEN = 16
IPPROTO_HOPOPTS = 0
IPPROTO_IGMP = 2
IPPROTO_GGP = 3
IPPROTO_IPV4 = 4
IPPROTO_IPIP = IPPROTO_IPV4
IPPROTO_ST = 7
IPPROTO_EGP = 8
IPPROTO_PIGP = 9
IPPROTO_RCCMON = 10
IPPROTO_NVPII = 11
IPPROTO_PUP = 12
IPPROTO_ARGUS = 13
IPPROTO_EMCON = 14
IPPROTO_XNET = 15
IPPROTO_CHAOS = 16
IPPROTO_MUX = 18
IPPROTO_MEAS = 19
IPPROTO_HMP = 20
IPPROTO_PRM = 21
IPPROTO_IDP = 22
IPPROTO_TRUNK1 = 23
IPPROTO_TRUNK2 = 24
IPPROTO_LEAF1 = 25
IPPROTO_LEAF2 = 26
IPPROTO_RDP = 27
IPPROTO_IRTP = 28
IPPROTO_TP = 29
IPPROTO_BLT = 30
IPPROTO_NSP = 31
IPPROTO_INP = 32
IPPROTO_SEP = 33
IPPROTO_3PC = 34
IPPROTO_IDPR = 35
IPPROTO_XTP = 36
IPPROTO_DDP = 37
IPPROTO_CMTP = 38
IPPROTO_TPXX = 39
IPPROTO_IL = 40
IPPROTO_IPV6 = 41
IPPROTO_SDRP = 42
IPPROTO_ROUTING = 43
IPPROTO_FRAGMENT = 44
IPPROTO_IDRP = 45
IPPROTO_RSVP = 46
IPPROTO_GRE = 47
IPPROTO_MHRP = 48
IPPROTO_BHA = 49
IPPROTO_ESP = 50
IPPROTO_AH = 51
IPPROTO_INLSP = 52
IPPROTO_SWIPE = 53
IPPROTO_NHRP = 54
IPPROTO_MOBILE = 55
IPPROTO_TLSP = 56
IPPROTO_SKIP = 57
IPPROTO_ICMPV6 = 58
IPPROTO_NONE = 59
IPPROTO_DSTOPTS = 60
IPPROTO_AHIP = 61
IPPROTO_CFTP = 62
IPPROTO_HELLO = 63
IPPROTO_SATEXPAK = 64
IPPROTO_KRYPTOLAN = 65
IPPROTO_RVD = 66
IPPROTO_IPPC = 67
IPPROTO_ADFS = 68
IPPROTO_SATMON = 69
IPPROTO_VISA = 70
IPPROTO_IPCV = 71
IPPROTO_CPNX = 72
IPPROTO_CPHB = 73
IPPROTO_WSN = 74
IPPROTO_PVP = 75
IPPROTO_BRSATMON = 76
IPPROTO_ND = 77
IPPROTO_WBMON = 78
IPPROTO_WBEXPAK = 79
IPPROTO_EON = 80
IPPROTO_VMTP = 81
IPPROTO_SVMTP = 82
IPPROTO_VINES = 83
IPPROTO_TTP = 84
IPPROTO_IGP = 85
IPPROTO_DGP = 86
IPPROTO_TCF = 87
IPPROTO_IGRP = 88
IPPROTO_OSPFIGP = 89
IPPROTO_SRPC = 90
IPPROTO_LARP = 91
IPPROTO_MTP = 92
IPPROTO_AX25 = 93
IPPROTO_IPEIP = 94
IPPROTO_MICP = 95
IPPROTO_SCCSP = 96
IPPROTO_ETHERIP = 97
IPPROTO_ENCAP = 98
IPPROTO_APES = 99
IPPROTO_GMTP = 100
IPPROTO_IPCOMP = 108
IPPROTO_SCTP = 132
IPPROTO_PIM = 103
IPPROTO_CARP = 112
IPPROTO_PGM = 113
IPPROTO_PFSYNC = 240
IPPROTO_OLD_DIVERT = 254
IPPROTO_MAX = 256
IPPROTO_DONE = 257
IPPROTO_DIVERT = 258
IPPROTO_SPACER = 32767
IPPORT_RESERVED = 1024
IPPORT_HIFIRSTAUTO = 49152
IPPORT_HILASTAUTO = 65535
IPPORT_RESERVEDSTART = 600
IPPORT_MAX = 65535
def IN_CLASSA(i): return (((u_int32_t)(i) & (-2147483648)) == 0)
IN_CLASSA_NET = (-16777216)
IN_CLASSA_NSHIFT = 24
IN_CLASSA_HOST = 0x00ffffff
IN_CLASSA_MAX = 128
def IN_CLASSB(i): return (((u_int32_t)(i) & (-1073741824)) == (-2147483648))
IN_CLASSB_NET = (-65536)
IN_CLASSB_NSHIFT = 16
IN_CLASSB_HOST = 0x0000ffff
IN_CLASSB_MAX = 65536
def IN_CLASSC(i): return (((u_int32_t)(i) & (-536870912)) == (-1073741824))
IN_CLASSC_NET = (-256)
IN_CLASSC_NSHIFT = 8
IN_CLASSC_HOST = 0x000000ff
def IN_CLASSD(i): return (((u_int32_t)(i) & (-268435456)) == (-536870912))
IN_CLASSD_NET = (-268435456)
IN_CLASSD_NSHIFT = 28
IN_CLASSD_HOST = 0x0fffffff
def IN_MULTICAST(i): return IN_CLASSD(i)
def IN_EXPERIMENTAL(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
def IN_BADCLASS(i): return (((u_int32_t)(i) & (-268435456)) == (-268435456))
def IN_LINKLOCAL(i): return (((u_int32_t)(i) & (-65536)) == (-1442971648))
def IN_LOCAL_GROUP(i): return (((u_int32_t)(i) & (-256)) == (-536870912))
INADDR_NONE = (-1)
IN_LOOPBACKNET = 127
IP_OPTIONS = 1
IP_HDRINCL = 2
IP_TOS = 3
IP_TTL = 4
IP_RECVOPTS = 5
IP_RECVRETOPTS = 6
IP_RECVDSTADDR = 7
IP_SENDSRCADDR = IP_RECVDSTADDR
IP_RETOPTS = 8
IP_MULTICAST_IF = 9
IP_MULTICAST_TTL = 10
IP_MULTICAST_LOOP = 11
IP_ADD_MEMBERSHIP = 12
IP_DROP_MEMBERSHIP = 13
IP_MULTICAST_VIF = 14
IP_RSVP_ON = 15
IP_RSVP_OFF = 16
IP_RSVP_VIF_ON = 17
IP_RSVP_VIF_OFF = 18
IP_PORTRANGE = 19
IP_RECVIF = 20
IP_IPSEC_POLICY = 21
IP_FAITH = 22
IP_ONESBCAST = 23
IP_FW_TABLE_ADD = 40
IP_FW_TABLE_DEL = 41
IP_FW_TABLE_FLUSH = 42
IP_FW_TABLE_GETSIZE = 43
IP_FW_TABLE_LIST = 44
IP_FW_ADD = 50
IP_FW_DEL = 51
IP_FW_FLUSH = 52
IP_FW_ZERO = 53
IP_FW_GET = 54
IP_FW_RESETLOG = 55
IP_FW_NAT_CFG = 56
IP_FW_NAT_DEL = 57
IP_FW_NAT_GET_CONFIG = 58
IP_FW_NAT_GET_LOG = 59
IP_DUMMYNET_CONFIGURE = 60
IP_DUMMYNET_DEL = 61
IP_DUMMYNET_FLUSH = 62
IP_DUMMYNET_GET = 64
IP_RECVTTL = 65
IP_MINTTL = 66
IP_DONTFRAG = 67
IP_ADD_SOURCE_MEMBERSHIP = 70
IP_DROP_SOURCE_MEMBERSHIP = 71
IP_BLOCK_SOURCE = 72
IP_UNBLOCK_SOURCE = 73
IP_MSFILTER = 74
MCAST_JOIN_GROUP = 80
MCAST_LEAVE_GROUP = 81
MCAST_JOIN_SOURCE_GROUP = 82
MCAST_LEAVE_SOURCE_GROUP = 83
MCAST_BLOCK_SOURCE = 84
MCAST_UNBLOCK_SOURCE = 85
IP_DEFAULT_MULTICAST_TTL = 1
IP_DEFAULT_MULTICAST_LOOP = 1
IP_MIN_MEMBERSHIPS = 31
IP_MAX_MEMBERSHIPS = 4095
IP_MAX_SOURCE_FILTER = 1024
MCAST_INCLUDE = 1
MCAST_EXCLUDE = 2
IP_PORTRANGE_DEFAULT = 0
IP_PORTRANGE_HIGH = 1
IP_PORTRANGE_LOW = 2
IPPROTO_MAXID = (IPPROTO_AH + 1)
IPCTL_FORWARDING = 1
IPCTL_SENDREDIRECTS = 2
IPCTL_DEFTTL = 3
IPCTL_DEFMTU = 4
IPCTL_RTEXPIRE = 5
IPCTL_RTMINEXPIRE = 6
IPCTL_RTMAXCACHE = 7
IPCTL_SOURCEROUTE = 8
IPCTL_DIRECTEDBROADCAST = 9
IPCTL_INTRQMAXLEN = 10
IPCTL_INTRQDROPS = 11
IPCTL_STATS = 12
IPCTL_ACCEPTSOURCEROUTE = 13
IPCTL_FASTFORWARDING = 14
IPCTL_KEEPFAITH = 15
IPCTL_GIF_TTL = 16
IPCTL_MAXID = 17
def in_nullhost(x): return ((x).s_addr == INADDR_ANY)
# Included from netinet6/in6.h
__KAME_VERSION = "FreeBSD"
IPV6PORT_RESERVED = 1024
IPV6PORT_ANONMIN = 49152
IPV6PORT_ANONMAX = 65535
IPV6PORT_RESERVEDMIN = 600
IPV6PORT_RESERVEDMAX = (IPV6PORT_RESERVED-1)
INET6_ADDRSTRLEN = 46
IPV6_ADDR_INT32_ONE = 1
IPV6_ADDR_INT32_TWO = 2
IPV6_ADDR_INT32_MNL = (-16711680)
IPV6_ADDR_INT32_MLL = (-16646144)
IPV6_ADDR_INT32_SMP = 0x0000ffff
IPV6_ADDR_INT16_ULL = 0xfe80
IPV6_ADDR_INT16_USL = 0xfec0
IPV6_ADDR_INT16_MLL = 0xff02
IPV6_ADDR_INT32_ONE = 0x01000000
IPV6_ADDR_INT32_TWO = 0x02000000
IPV6_ADDR_INT32_MNL = 0x000001ff
IPV6_ADDR_INT32_MLL = 0x000002ff
IPV6_ADDR_INT32_SMP = (-65536)
IPV6_ADDR_INT16_ULL = 0x80fe
IPV6_ADDR_INT16_USL = 0xc0fe
IPV6_ADDR_INT16_MLL = 0x02ff
def IN6_IS_ADDR_UNSPECIFIED(a): return \
def IN6_IS_ADDR_LOOPBACK(a): return \
def IN6_IS_ADDR_V4COMPAT(a): return \
def IN6_IS_ADDR_V4MAPPED(a): return \
IPV6_ADDR_SCOPE_NODELOCAL = 0x01
IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
IPV6_ADDR_SCOPE_SITELOCAL = 0x05
IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
IPV6_ADDR_SCOPE_GLOBAL = 0x0e
__IPV6_ADDR_SCOPE_NODELOCAL = 0x01
__IPV6_ADDR_SCOPE_INTFACELOCAL = 0x01
__IPV6_ADDR_SCOPE_LINKLOCAL = 0x02
__IPV6_ADDR_SCOPE_SITELOCAL = 0x05
__IPV6_ADDR_SCOPE_ORGLOCAL = 0x08
__IPV6_ADDR_SCOPE_GLOBAL = 0x0e
def IN6_IS_ADDR_LINKLOCAL(a): return \
def IN6_IS_ADDR_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_INTFACELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_ADDR_MC_NODELOCAL(a): return \
def IN6_IS_ADDR_MC_LINKLOCAL(a): return \
def IN6_IS_ADDR_MC_SITELOCAL(a): return \
def IN6_IS_ADDR_MC_ORGLOCAL(a): return \
def IN6_IS_ADDR_MC_GLOBAL(a): return \
def IN6_IS_SCOPE_LINKLOCAL(a): return \
def IN6_IS_SCOPE_EMBED(a): return \
def IFA6_IS_DEPRECATED(a): return \
def IFA6_IS_INVALID(a): return \
IPV6_OPTIONS = 1
IPV6_RECVOPTS = 5
IPV6_RECVRETOPTS = 6
IPV6_RECVDSTADDR = 7
IPV6_RETOPTS = 8
IPV6_SOCKOPT_RESERVED1 = 3
IPV6_UNICAST_HOPS = 4
IPV6_MULTICAST_IF = 9
IPV6_MULTICAST_HOPS = 10
IPV6_MULTICAST_LOOP = 11
IPV6_JOIN_GROUP = 12
IPV6_LEAVE_GROUP = 13
IPV6_PORTRANGE = 14
ICMP6_FILTER = 18
IPV6_2292PKTINFO = 19
IPV6_2292HOPLIMIT = 20
IPV6_2292NEXTHOP = 21
IPV6_2292HOPOPTS = 22
IPV6_2292DSTOPTS = 23
IPV6_2292RTHDR = 24
IPV6_2292PKTOPTIONS = 25
IPV6_CHECKSUM = 26
IPV6_V6ONLY = 27
IPV6_BINDV6ONLY = IPV6_V6ONLY
IPV6_IPSEC_POLICY = 28
IPV6_FAITH = 29
IPV6_FW_ADD = 30
IPV6_FW_DEL = 31
IPV6_FW_FLUSH = 32
IPV6_FW_ZERO = 33
IPV6_FW_GET = 34
IPV6_RTHDRDSTOPTS = 35
IPV6_RECVPKTINFO = 36
IPV6_RECVHOPLIMIT = 37
IPV6_RECVRTHDR = 38
IPV6_RECVHOPOPTS = 39
IPV6_RECVDSTOPTS = 40
IPV6_RECVRTHDRDSTOPTS = 41
IPV6_USE_MIN_MTU = 42
IPV6_RECVPATHMTU = 43
IPV6_PATHMTU = 44
IPV6_REACHCONF = 45
IPV6_PKTINFO = 46
IPV6_HOPLIMIT = 47
IPV6_NEXTHOP = 48
IPV6_HOPOPTS = 49
IPV6_DSTOPTS = 50
IPV6_RTHDR = 51
IPV6_PKTOPTIONS = 52
IPV6_RECVTCLASS = 57
IPV6_AUTOFLOWLABEL = 59
IPV6_TCLASS = 61
IPV6_DONTFRAG = 62
IPV6_PREFER_TEMPADDR = 63
IPV6_MSFILTER = 74
IPV6_RTHDR_LOOSE = 0
IPV6_RTHDR_STRICT = 1
IPV6_RTHDR_TYPE_0 = 0
IPV6_DEFAULT_MULTICAST_HOPS = 1
IPV6_DEFAULT_MULTICAST_LOOP = 1
IPV6_PORTRANGE_DEFAULT = 0
IPV6_PORTRANGE_HIGH = 1
IPV6_PORTRANGE_LOW = 2
IPV6PROTO_MAXID = (IPPROTO_PIM + 1)
IPV6CTL_FORWARDING = 1
IPV6CTL_SENDREDIRECTS = 2
IPV6CTL_DEFHLIM = 3
IPV6CTL_DEFMTU = 4
IPV6CTL_FORWSRCRT = 5
IPV6CTL_STATS = 6
IPV6CTL_MRTSTATS = 7
IPV6CTL_MRTPROTO = 8
IPV6CTL_MAXFRAGPACKETS = 9
IPV6CTL_SOURCECHECK = 10
IPV6CTL_SOURCECHECK_LOGINT = 11
IPV6CTL_ACCEPT_RTADV = 12
IPV6CTL_KEEPFAITH = 13
IPV6CTL_LOG_INTERVAL = 14
IPV6CTL_HDRNESTLIMIT = 15
IPV6CTL_DAD_COUNT = 16
IPV6CTL_AUTO_FLOWLABEL = 17
IPV6CTL_DEFMCASTHLIM = 18
IPV6CTL_GIF_HLIM = 19
IPV6CTL_KAME_VERSION = 20
IPV6CTL_USE_DEPRECATED = 21
IPV6CTL_RR_PRUNE = 22
IPV6CTL_MAPPED_ADDR = 23
IPV6CTL_V6ONLY = 24
IPV6CTL_RTEXPIRE = 25
IPV6CTL_RTMINEXPIRE = 26
IPV6CTL_RTMAXCACHE = 27
IPV6CTL_USETEMPADDR = 32
IPV6CTL_TEMPPLTIME = 33
IPV6CTL_TEMPVLTIME = 34
IPV6CTL_AUTO_LINKLOCAL = 35
IPV6CTL_RIP6STATS = 36
IPV6CTL_PREFER_TEMPADDR = 37
IPV6CTL_ADDRCTLPOLICY = 38
IPV6CTL_USE_DEFAULTZONE = 39
IPV6CTL_MAXFRAGS = 41
IPV6CTL_IFQ = 42
IPV6CTL_ISATAPRTR = 43
IPV6CTL_MCAST_PMTU = 44
IPV6CTL_STEALTH = 45
IPV6CTL_MAXID = 46
| lgpl-3.0 |
forairan/gagar | gagar/drawutils.py | 1 | 2510 | TWOPI = 6.28318530717958
BLACK = (0,0,0)
WHITE = (1,1,1)
GRAY = (.5,)*3
DARK_GRAY = (.2,)*3
LIGHT_GRAY = (.7,)*3
RED = (1,0,0)
GREEN = (0,1,0)
BLUE = (0,0,1)
YELLOW = (1,1,0)
TURQUOISE = (0,1,1)
FUCHSIA = (1,0,1)
ORANGE = (1,.5,0)
PURPLE = (.5,0,1)
LIGHT_GREEN = (.5,1,.5)
LIGHT_BLUE = (.5,.5,1)
def frange(start, end, step):
"""same as range(), but allows using floats"""
while start < end:
yield start
start += step
def to_rgba(c, a):
return c[0], c[1], c[2], a
def as_rect(tl, br=None, size=None):
"""Make tuple from 2 Vecs. Either bottom-right or rect size must be given."""
if size:
return tl.x, tl.y, size.x, size.y
else:
return tl.x, tl.y, br.x-tl.x, br.y-tl.y
def draw_text(c, pos, text, align='left', color=WHITE, shadow=None, outline=None, size=12, face='sans'):
try:
c.select_font_face(face)
c.set_font_size(size)
align = align.lower()
if align == 'center':
x_bearing, y_bearing, text_width, text_height, x_advance, y_advance \
= c.text_extents(text)
x = int(pos[0] - x_bearing - text_width / 2)
y = int(pos[1] - y_bearing - text_height / 2)
elif align == 'left':
x, y = map(int, pos)
elif align == 'right':
x_bearing, y_bearing, text_width, text_height, x_advance, y_advance \
= c.text_extents(text)
x = int(pos[0] - x_bearing - text_width)
y = int(pos[1])
else:
raise ValueError('Invalid alignment "%s"' % align)
if shadow:
s_color, s_offset = shadow
s_dx, s_dy = s_offset
c.move_to(x + s_dx, y + s_dy)
c.set_source_rgba(*s_color)
c.show_text(text)
if outline:
o_color, o_size = outline
c.move_to(x, y)
c.set_line_width(o_size)
c.set_source_rgba(*o_color)
c.text_path(text)
c.stroke()
c.move_to(x, y)
c.set_source_rgba(*color)
c.text_path(text)
c.fill()
except UnicodeEncodeError:
pass
def draw_circle(c, pos, radius, color=None):
x, y = pos
if color:
c.set_source_rgba(*color)
c.new_sub_path()
c.arc(x, y, radius, 0, TWOPI)
c.fill()
def draw_circle_outline(c, pos, radius, color=None):
x, y = pos
if color:
c.set_source_rgba(*color)
c.new_sub_path()
c.arc(x, y, radius, 0, TWOPI)
c.stroke()
| gpl-3.0 |
zsiciarz/django | django/test/client.py | 11 | 26926 | import json
import mimetypes
import os
import re
import sys
from copy import copy
from importlib import import_module
from io import BytesIO
from urllib.parse import unquote_to_bytes, urljoin, urlparse, urlsplit
from django.conf import settings
from django.core.handlers.base import BaseHandler
from django.core.handlers.wsgi import WSGIRequest
from django.core.signals import (
got_request_exception, request_finished, request_started,
)
from django.db import close_old_connections
from django.http import HttpRequest, QueryDict, SimpleCookie
from django.template import TemplateDoesNotExist
from django.test import signals
from django.test.utils import ContextList
from django.urls import resolve
from django.utils.encoding import force_bytes
from django.utils.functional import SimpleLazyObject, curry
from django.utils.http import urlencode
from django.utils.itercompat import is_iterable
__all__ = ('Client', 'RedirectCycleError', 'RequestFactory', 'encode_file', 'encode_multipart')
BOUNDARY = 'BoUnDaRyStRiNg'
MULTIPART_CONTENT = 'multipart/form-data; boundary=%s' % BOUNDARY
CONTENT_TYPE_RE = re.compile(r'.*; charset=([\w\d-]+);?')
# JSON Vendor Tree spec: https://tools.ietf.org/html/rfc6838#section-3.2
JSON_CONTENT_TYPE_RE = re.compile(r'^application\/(vnd\..+\+)?json')
class RedirectCycleError(Exception):
"""The test client has been asked to follow a redirect loop."""
def __init__(self, message, last_response):
super().__init__(message)
self.last_response = last_response
self.redirect_chain = last_response.redirect_chain
class FakePayload:
"""
A wrapper around BytesIO that restricts what can be read since data from
the network can't be seeked and cannot be read outside of its content
length. This makes sure that views can't do anything under the test client
that wouldn't work in real life.
"""
def __init__(self, content=None):
self.__content = BytesIO()
self.__len = 0
self.read_started = False
if content is not None:
self.write(content)
def __len__(self):
return self.__len
def read(self, num_bytes=None):
if not self.read_started:
self.__content.seek(0)
self.read_started = True
if num_bytes is None:
num_bytes = self.__len or 0
assert self.__len >= num_bytes, "Cannot read more than the available bytes from the HTTP incoming data."
content = self.__content.read(num_bytes)
self.__len -= num_bytes
return content
def write(self, content):
if self.read_started:
raise ValueError("Unable to write a payload after he's been read")
content = force_bytes(content)
self.__content.write(content)
self.__len += len(content)
def closing_iterator_wrapper(iterable, close):
try:
yield from iterable
finally:
request_finished.disconnect(close_old_connections)
close() # will fire request_finished
request_finished.connect(close_old_connections)
def conditional_content_removal(request, response):
"""
Simulate the behavior of most Web servers by removing the content of
responses for HEAD requests, 1xx, 204, and 304 responses. Ensure
compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
class ClientHandler(BaseHandler):
"""
A HTTP Handler that can be used for testing purposes. Use the WSGI
interface to compose requests, but return the raw HttpResponse object with
the originating WSGIRequest attached to its ``wsgi_request`` attribute.
"""
def __init__(self, enforce_csrf_checks=True, *args, **kwargs):
self.enforce_csrf_checks = enforce_csrf_checks
super().__init__(*args, **kwargs)
def __call__(self, environ):
# Set up middleware if needed. We couldn't do this earlier, because
# settings weren't available.
if self._middleware_chain is None:
self.load_middleware()
request_started.disconnect(close_old_connections)
request_started.send(sender=self.__class__, environ=environ)
request_started.connect(close_old_connections)
request = WSGIRequest(environ)
# sneaky little hack so that we can easily get round
# CsrfViewMiddleware. This makes life easier, and is probably
# required for backwards compatibility with external tests against
# admin views.
request._dont_enforce_csrf_checks = not self.enforce_csrf_checks
# Request goes through middleware.
response = self.get_response(request)
# Simulate behaviors of most Web servers.
conditional_content_removal(request, response)
# Attach the originating request to the response so that it could be
# later retrieved.
response.wsgi_request = request
# Emulate a WSGI server by calling the close method on completion.
if response.streaming:
response.streaming_content = closing_iterator_wrapper(
response.streaming_content, response.close)
else:
request_finished.disconnect(close_old_connections)
response.close() # will fire request_finished
request_finished.connect(close_old_connections)
return response
def store_rendered_templates(store, signal, sender, template, context, **kwargs):
"""
Store templates and contexts that are rendered.
The context is copied so that it is an accurate representation at the time
of rendering.
"""
store.setdefault('templates', []).append(template)
if 'context' not in store:
store['context'] = ContextList()
store['context'].append(copy(context))
def encode_multipart(boundary, data):
"""
Encode multipart POST data from a dictionary of form values.
The key will be used as the form data name; the value will be transmitted
as content. If the value is a file, the contents of the file will be sent
as an application/octet-stream; otherwise, str(value) will be sent.
"""
lines = []
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# Not by any means perfect, but good enough for our purposes.
def is_file(thing):
return hasattr(thing, "read") and callable(thing.read)
# Each bit of the multipart form data could be either a form value or a
# file, or a *list* of form values and/or files. Remember that HTTP field
# names can be duplicated!
for (key, value) in data.items():
if is_file(value):
lines.extend(encode_file(boundary, key, value))
elif not isinstance(value, str) and is_iterable(value):
for item in value:
if is_file(item):
lines.extend(encode_file(boundary, key, item))
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
item
])
else:
lines.extend(to_bytes(val) for val in [
'--%s' % boundary,
'Content-Disposition: form-data; name="%s"' % key,
'',
value
])
lines.extend([
to_bytes('--%s--' % boundary),
b'',
])
return b'\r\n'.join(lines)
def encode_file(boundary, key, file):
def to_bytes(s):
return force_bytes(s, settings.DEFAULT_CHARSET)
# file.name might not be a string. For example, it's an int for
# tempfile.TemporaryFile().
file_has_string_name = hasattr(file, 'name') and isinstance(file.name, str)
filename = os.path.basename(file.name) if file_has_string_name else ''
if hasattr(file, 'content_type'):
content_type = file.content_type
elif filename:
content_type = mimetypes.guess_type(filename)[0]
else:
content_type = None
if content_type is None:
content_type = 'application/octet-stream'
if not filename:
filename = key
return [
to_bytes('--%s' % boundary),
to_bytes('Content-Disposition: form-data; name="%s"; filename="%s"'
% (key, filename)),
to_bytes('Content-Type: %s' % content_type),
b'',
to_bytes(file.read())
]
class RequestFactory:
"""
Class that lets you create mock Request objects for use in testing.
Usage:
rf = RequestFactory()
get_request = rf.get('/hello/')
post_request = rf.post('/submit/', {'foo': 'bar'})
Once you have a request object you can pass it to any view function,
just as if that view had been hooked up using a URLconf.
"""
def __init__(self, **defaults):
self.defaults = defaults
self.cookies = SimpleCookie()
self.errors = BytesIO()
def _base_environ(self, **request):
"""
The base environment for a request.
"""
# This is a minimal valid WSGI environ dictionary, plus:
# - HTTP_COOKIE: for cookie support,
# - REMOTE_ADDR: often useful, see #8551.
# See http://www.python.org/dev/peps/pep-3333/#environ-variables
environ = {
'HTTP_COOKIE': self.cookies.output(header='', sep='; '),
'PATH_INFO': '/',
'REMOTE_ADDR': '127.0.0.1',
'REQUEST_METHOD': 'GET',
'SCRIPT_NAME': '',
'SERVER_NAME': 'testserver',
'SERVER_PORT': '80',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.input': FakePayload(b''),
'wsgi.errors': self.errors,
'wsgi.multiprocess': True,
'wsgi.multithread': False,
'wsgi.run_once': False,
}
environ.update(self.defaults)
environ.update(request)
return environ
def request(self, **request):
"Construct a generic request object."
return WSGIRequest(self._base_environ(**request))
def _encode_data(self, data, content_type):
if content_type is MULTIPART_CONTENT:
return encode_multipart(BOUNDARY, data)
else:
# Encode the content so that the byte representation is correct.
match = CONTENT_TYPE_RE.match(content_type)
if match:
charset = match.group(1)
else:
charset = settings.DEFAULT_CHARSET
return force_bytes(data, encoding=charset)
def _get_path(self, parsed):
path = parsed.path
# If there are parameters, add them
if parsed.params:
path += ";" + parsed.params
path = unquote_to_bytes(path)
# Replace the behavior where non-ASCII values in the WSGI environ are
# arbitrarily decoded with ISO-8859-1.
# Refs comment in `get_bytes_from_wsgi()`.
return path.decode('iso-8859-1')
def get(self, path, data=None, secure=False, **extra):
"""Construct a GET request."""
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('GET', path, secure=secure, **r)
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
secure=False, **extra):
"""Construct a POST request."""
data = {} if data is None else data
post_data = self._encode_data(data, content_type)
return self.generic('POST', path, post_data, content_type,
secure=secure, **extra)
def head(self, path, data=None, secure=False, **extra):
"""Construct a HEAD request."""
data = {} if data is None else data
r = {
'QUERY_STRING': urlencode(data, doseq=True),
}
r.update(extra)
return self.generic('HEAD', path, secure=secure, **r)
def trace(self, path, secure=False, **extra):
"""Construct a TRACE request."""
return self.generic('TRACE', path, secure=secure, **extra)
def options(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"Construct an OPTIONS request."
return self.generic('OPTIONS', path, data, content_type,
secure=secure, **extra)
def put(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PUT request."""
return self.generic('PUT', path, data, content_type,
secure=secure, **extra)
def patch(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a PATCH request."""
return self.generic('PATCH', path, data, content_type,
secure=secure, **extra)
def delete(self, path, data='', content_type='application/octet-stream',
secure=False, **extra):
"""Construct a DELETE request."""
return self.generic('DELETE', path, data, content_type,
secure=secure, **extra)
def generic(self, method, path, data='',
content_type='application/octet-stream', secure=False,
**extra):
"""Construct an arbitrary HTTP request."""
parsed = urlparse(str(path)) # path can be lazy
data = force_bytes(data, settings.DEFAULT_CHARSET)
r = {
'PATH_INFO': self._get_path(parsed),
'REQUEST_METHOD': method,
'SERVER_PORT': '443' if secure else '80',
'wsgi.url_scheme': 'https' if secure else 'http',
}
if data:
r.update({
'CONTENT_LENGTH': len(data),
'CONTENT_TYPE': content_type,
'wsgi.input': FakePayload(data),
})
r.update(extra)
# If QUERY_STRING is absent or empty, we want to extract it from the URL.
if not r.get('QUERY_STRING'):
# WSGI requires latin-1 encoded strings. See get_path_info().
query_string = force_bytes(parsed[4]).decode('iso-8859-1')
r['QUERY_STRING'] = query_string
return self.request(**r)
class Client(RequestFactory):
"""
A class that can act as a client for testing purposes.
It allows the user to compose GET and POST requests, and
obtain the response that the server gave to those requests.
The server Response objects are annotated with the details
of the contexts and templates that were rendered during the
process of serving the request.
Client objects are stateful - they will retain cookie (and
thus session) details for the lifetime of the Client instance.
This is not intended as a replacement for Twill/Selenium or
the like - it is here to allow testing against the
contexts and templates produced by a view, rather than the
HTML rendered to the end-user.
"""
def __init__(self, enforce_csrf_checks=False, **defaults):
super().__init__(**defaults)
self.handler = ClientHandler(enforce_csrf_checks)
self.exc_info = None
def store_exc_info(self, **kwargs):
"""Store exceptions when they are generated by a view."""
self.exc_info = sys.exc_info()
@property
def session(self):
"""Return the current session variables."""
engine = import_module(settings.SESSION_ENGINE)
cookie = self.cookies.get(settings.SESSION_COOKIE_NAME)
if cookie:
return engine.SessionStore(cookie.value)
session = engine.SessionStore()
session.save()
self.cookies[settings.SESSION_COOKIE_NAME] = session.session_key
return session
def request(self, **request):
"""
The master request method. Compose the environment dictionary and pass
to the handler, return the result of the handler. Assume defaults for
the query environment, which can be overridden using the arguments to
the request.
"""
environ = self._base_environ(**request)
# Curry a data dictionary into an instance of the template renderer
# callback function.
data = {}
on_template_render = curry(store_rendered_templates, data)
signal_uid = "template-render-%s" % id(request)
signals.template_rendered.connect(on_template_render, dispatch_uid=signal_uid)
# Capture exceptions created by the handler.
exception_uid = "request-exception-%s" % id(request)
got_request_exception.connect(self.store_exc_info, dispatch_uid=exception_uid)
try:
try:
response = self.handler(environ)
except TemplateDoesNotExist as e:
# If the view raises an exception, Django will attempt to show
# the 500.html template. If that template is not available,
# we should ignore the error in favor of re-raising the
# underlying exception that caused the 500 error. Any other
# template found to be missing during view error handling
# should be reported as-is.
if e.args != ('500.html',):
raise
# Look for a signalled exception, clear the current context
# exception data, then re-raise the signalled exception.
# Also make sure that the signalled exception is cleared from
# the local cache!
if self.exc_info:
exc_info = self.exc_info
self.exc_info = None
raise exc_info[0](exc_info[1]).with_traceback(exc_info[2])
# Save the client and request that stimulated the response.
response.client = self
response.request = request
# Add any rendered template detail to the response.
response.templates = data.get("templates", [])
response.context = data.get("context")
response.json = curry(self._parse_json, response)
# Attach the ResolverMatch instance to the response
response.resolver_match = SimpleLazyObject(lambda: resolve(request['PATH_INFO']))
# Flatten a single context. Not really necessary anymore thanks to
# the __getattr__ flattening in ContextList, but has some edge-case
# backwards-compatibility implications.
if response.context and len(response.context) == 1:
response.context = response.context[0]
# Update persistent cookie data.
if response.cookies:
self.cookies.update(response.cookies)
return response
finally:
signals.template_rendered.disconnect(dispatch_uid=signal_uid)
got_request_exception.disconnect(dispatch_uid=exception_uid)
def get(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using GET."""
response = super().get(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def post(self, path, data=None, content_type=MULTIPART_CONTENT,
follow=False, secure=False, **extra):
"""Request a response from the server using POST."""
response = super().post(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def head(self, path, data=None, follow=False, secure=False, **extra):
"""Request a response from the server using HEAD."""
response = super().head(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def options(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Request a response from the server using OPTIONS."""
response = super().options(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def put(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PUT."""
response = super().put(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def patch(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a resource to the server using PATCH."""
response = super().patch(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def delete(self, path, data='', content_type='application/octet-stream',
follow=False, secure=False, **extra):
"""Send a DELETE request to the server."""
response = super().delete(path, data=data, content_type=content_type, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def trace(self, path, data='', follow=False, secure=False, **extra):
"""Send a TRACE request to the server."""
response = super().trace(path, data=data, secure=secure, **extra)
if follow:
response = self._handle_redirects(response, **extra)
return response
def login(self, **credentials):
"""
Set the Factory to appear as if it has successfully logged into a site.
Return True if login is possible; False if the provided credentials
are incorrect.
"""
from django.contrib.auth import authenticate
user = authenticate(**credentials)
if user:
self._login(user)
return True
else:
return False
def force_login(self, user, backend=None):
def get_backend():
from django.contrib.auth import load_backend
for backend_path in settings.AUTHENTICATION_BACKENDS:
backend = load_backend(backend_path)
if hasattr(backend, 'get_user'):
return backend_path
if backend is None:
backend = get_backend()
user.backend = backend
self._login(user, backend)
def _login(self, user, backend=None):
from django.contrib.auth import login
engine = import_module(settings.SESSION_ENGINE)
# Create a fake request to store login details.
request = HttpRequest()
if self.session:
request.session = self.session
else:
request.session = engine.SessionStore()
login(request, user, backend)
# Save the session values.
request.session.save()
# Set the cookie to represent the session.
session_cookie = settings.SESSION_COOKIE_NAME
self.cookies[session_cookie] = request.session.session_key
cookie_data = {
'max-age': None,
'path': '/',
'domain': settings.SESSION_COOKIE_DOMAIN,
'secure': settings.SESSION_COOKIE_SECURE or None,
'expires': None,
}
self.cookies[session_cookie].update(cookie_data)
def logout(self):
"""Log out the user by removing the cookies and session object."""
from django.contrib.auth import get_user, logout
request = HttpRequest()
engine = import_module(settings.SESSION_ENGINE)
if self.session:
request.session = self.session
request.user = get_user(request)
else:
request.session = engine.SessionStore()
logout(request)
self.cookies = SimpleCookie()
def _parse_json(self, response, **extra):
if not hasattr(response, '_json'):
if not JSON_CONTENT_TYPE_RE.match(response.get('Content-Type')):
raise ValueError(
'Content-Type header is "{0}", not "application/json"'
.format(response.get('Content-Type'))
)
response._json = json.loads(response.content.decode(), **extra)
return response._json
def _handle_redirects(self, response, **extra):
"""
Follow any redirects by requesting responses from the server using GET.
"""
response.redirect_chain = []
while response.status_code in (301, 302, 303, 307):
response_url = response.url
redirect_chain = response.redirect_chain
redirect_chain.append((response_url, response.status_code))
url = urlsplit(response_url)
if url.scheme:
extra['wsgi.url_scheme'] = url.scheme
if url.hostname:
extra['SERVER_NAME'] = url.hostname
if url.port:
extra['SERVER_PORT'] = str(url.port)
# Prepend the request path to handle relative path redirects
path = url.path
if not path.startswith('/'):
path = urljoin(response.request['PATH_INFO'], path)
response = self.get(path, QueryDict(url.query), follow=False, **extra)
response.redirect_chain = redirect_chain
if redirect_chain[-1] in redirect_chain[:-1]:
# Check that we're not redirecting to somewhere we've already
# been to, to prevent loops.
raise RedirectCycleError("Redirect loop detected.", last_response=response)
if len(redirect_chain) > 20:
# Such a lengthy chain likely also means a loop, but one with
# a growing path, changing view, or changing query argument;
# 20 is the value of "network.http.redirection-limit" from Firefox.
raise RedirectCycleError("Too many redirects.", last_response=response)
return response
| bsd-3-clause |
afedchin/xbmctorrent | resources/site-packages/xbmcswift2/common.py | 34 | 3614 | '''
xbmcswift2.common
-----------------
This module contains some common helpful functions.
:copyright: (c) 2012 by Jonathan Beluch
:license: GPLv3, see LICENSE for more details.
'''
import urllib
import urllib2
try:
import cPickle as pickle
except ImportError:
import pickle
def xbmc_url(url, **options):
'''Appends key/val pairs to the end of a URL. Useful for passing arbitrary
HTTP headers to XBMC to be used when fetching a media resource, e.g.
cookies.
'''
optionstring = urllib.urlencode(options)
if optionstring:
return url + '|' + optionstring
return url
def enum(*args, **kwargs):
'''An enum class to mirror XBMC constatns. All args and kwargs.keys are
added as atrrs on the returned object.
>>> States = enum('NEW_JERSEY', NY='NEW_YORK')
>>> States.NY
'NEW_YORK'
>>> States.NEW_JERSEY
'NEW_JERSEY'
>>> States._fields
['NY', 'NEW_JERSEY']
'''
kwargs.update((arg, arg) for arg in args)
kwargs['_fields'] = kwargs.keys()
return type('Enum', (), kwargs)
Modes = enum('XBMC', 'ONCE', 'CRAWL', 'INTERACTIVE')
DEBUG_MODES = [Modes.ONCE, Modes.CRAWL, Modes.INTERACTIVE]
def clean_dict(dct):
'''Returns a dict where items with a None value are removed'''
return dict((key, val) for key, val in dct.items() if val is not None)
def pickle_dict(items):
'''Returns a new dictionary where values which aren't instances of
basestring are pickled. Also, a new key '_pickled' contains a comma
separated list of keys corresponding to the pickled values.
'''
ret = {}
pickled_keys = []
for key, val in items.items():
if isinstance(val, basestring):
ret[key] = val
else:
pickled_keys.append(key)
ret[key] = pickle.dumps(val)
if pickled_keys:
ret['_pickled'] = ','.join(pickled_keys)
return ret
def unpickle_args(items):
'''Takes a dict and unpickles values whose keys are found in
'_pickled' key.
>>> unpickle_args({'_pickled': ['foo']. 'foo': ['I3%0A.']})
{'foo': 3}
'''
# Technically there can be more than one _pickled value. At this point
# we'll just use the first one
pickled= items.pop('_pickled', None)
if pickled is None:
return items
pickled_keys = pickled[0].split(',')
ret = {}
for key, vals in items.items():
if key in pickled_keys:
ret[key] = [pickle.loads(val) for val in vals]
else:
ret[key] = vals
return ret
def unpickle_dict(items):
'''Returns a dict pickled with pickle_dict'''
pickled_keys = items.pop('_pickled', '').split(',')
ret = {}
for key, val in items.items():
if key in pickled_keys:
ret[key] = pickle.loads(val)
else:
ret[key] = val
return ret
def download_page(url, data=None):
'''Returns the response for the given url. The optional data argument is
passed directly to urlopen.'''
conn = urllib2.urlopen(url, data)
resp = conn.read()
conn.close()
return resp
_hextochr = dict(('%02x' % i, chr(i)) for i in range(256))
_hextochr.update(('%02X' % i, chr(i)) for i in range(256))
def unhex(inp):
'''unquote(r'abc\x20def') -> 'abc def'.'''
res = inp.split(r'\x')
for i in xrange(1, len(res)):
item = res[i]
try:
res[i] = _hextochr[item[:2]] + item[2:]
except KeyError:
res[i] = '%' + item
except UnicodeDecodeError:
res[i] = unichr(int(item[:2], 16)) + item[2:]
return ''.join(res)
| gpl-3.0 |
maikelwever/glances | glances/plugins/glances_ip.py | 10 | 3785 | # -*- coding: utf-8 -*-
#
# This file is part of Glances.
#
# Copyright (C) 2015 Nicolargo <[email protected]>
#
# Glances is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Glances is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""IP plugin."""
# Import system libs
try:
import netifaces
netifaces_tag = True
except ImportError:
netifaces_tag = False
# Import Glances libs
from glances.core.glances_logging import logger
from glances.plugins.glances_plugin import GlancesPlugin
class Plugin(GlancesPlugin):
"""Glances IP Plugin.
stats is a dict
"""
def __init__(self, args=None):
"""Init the plugin."""
GlancesPlugin.__init__(self, args=args)
# We want to display the stat in the curse interface
self.display_curse = True
# Init the stats
self.reset()
def reset(self):
"""Reset/init the stats."""
self.stats = {}
@GlancesPlugin._log_result_decorator
def update(self):
"""Update IP stats using the input method.
Stats is dict
"""
# Reset stats
self.reset()
if self.input_method == 'local' and netifaces_tag:
# Update stats using the netifaces lib
try:
default_gw = netifaces.gateways()['default'][netifaces.AF_INET]
except KeyError:
logger.debug("Can not grab the default gateway")
else:
try:
self.stats['address'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['addr']
self.stats['mask'] = netifaces.ifaddresses(default_gw[1])[netifaces.AF_INET][0]['netmask']
self.stats['mask_cidr'] = self.ip_to_cidr(self.stats['mask'])
self.stats['gateway'] = netifaces.gateways()['default'][netifaces.AF_INET][0]
except KeyError as e:
logger.debug("Can not grab IP information (%s)".format(e))
elif self.input_method == 'snmp':
# Not implemented yet
pass
# Update the view
self.update_views()
return self.stats
def update_views(self):
"""Update stats views."""
# Call the father's method
GlancesPlugin.update_views(self)
# Add specifics informations
# Optional
for key in self.stats.keys():
self.views[key]['optional'] = True
def msg_curse(self, args=None):
"""Return the dict to display in the curse interface."""
# Init the return message
ret = []
# Only process if stats exist and display plugin enable...
if not self.stats or args.disable_ip:
return ret
# Build the string message
msg = ' - '
ret.append(self.curse_add_line(msg))
msg = 'IP '
ret.append(self.curse_add_line(msg, 'TITLE'))
msg = '{0:}/{1}'.format(self.stats['address'], self.stats['mask_cidr'])
ret.append(self.curse_add_line(msg))
return ret
@staticmethod
def ip_to_cidr(ip):
"""Convert IP address to CIDR.
Example: '255.255.255.0' will return 24
"""
return sum(map(lambda x: int(x) << 8, ip.split('.'))) // 8128
| lgpl-3.0 |
dandxy89/rf_helicopter | Model/Q_Learning_Agent.py | 1 | 15899 | # Purpose: Agent uses the Q-Learning Algorithm to Interact with the Environment
#
# Info: Class that Implements the Q-Learning Algorithm
#
# Developed as part of the Software Agents Course at City University
#
# Dev: Dan Dixey and Enrico Lopedoto
#
#
import logging
import os
import pickle
from random import choice, random
import numpy as np
try:
from keras.layers.convolutional import Convolution1D, MaxPooling1D
from keras.layers.core import Dense, Dropout, Activation, Flatten
from keras.layers.embeddings import Embedding
from keras.layers.recurrent import LSTM
from keras.models import Sequential
from keras.optimizers import RMSprop, Adadelta
except:
logging.warning(
'Unable to Import Deep Learning Modules - Are they installed?')
pass
class Q_Learning_Algorithm:
"""
Basic Implementation of the Q-Learning Algorithm
"""
def __init__(self, settings=None):
"""
:param settings: dictionary of settings
"""
assert settings is not None, 'Pass the Settings'
self.q = {}
self.directory = os.path.join(os.getcwd(), 'Model/NN_Model/')
self.actions = range(settings['nb_actions'])
self.alpha = settings['alpha']
self.epsilon = settings['epsilon']
self.gamma = settings['gamma']
self.train = settings['train']
def get_Qvalue(self, state, action):
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
old_value = self.q.get((state, action), None)
if old_value is None:
self.q[(state, action)] = reward
else:
self.q[(state, action)] = old_value + \
self.alpha * (value - old_value)
def choose_Action(self, state):
if self.train:
if random() < self.epsilon:
action = choice(self.actions)
else:
q = [self.get_Qvalue(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[
i] == maxQ]
i = choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
else:
q = [self.get_Qvalue(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
return action
def learn(self, state1, action1, reward, state2):
maxqnew = max([self.get_Qvalue(state2, a) for a in self.actions])
self.learnQ(state1, action1, reward, reward + self.gamma * maxqnew)
def save_model(self, name):
"""
Saves the Dictionary to a Pickle
:param name: str
:return: None
"""
output = open(self.directory + name + '.pkl', 'wb')
pickle.dump(self.q, output)
output.close()
def load_model(self, name):
"""
Loads the Dictionary into the Class
:param name: str
:return: None
"""
pkl_file = open(self.directory + name + '.pkl', 'rb')
self.q = pickle.load(pkl_file)
pkl_file.close()
class Q_Learning_Epsilon_Decay:
"""
Epsilon Rate Decay - Over time Agent gets more responsibility for its own actions
"""
def __init__(self, settings=None):
"""
:param settings: dictionary of settings
"""
assert settings is not None, 'Pass the Settings'
self.q = {}
self.directory = os.path.join(os.getcwd(), 'Model/NN_Model/')
self.actions = range(settings['nb_actions'])
self.alpha = settings['alpha']
self.epsilon = settings['epsilon']
self.gamma = settings['gamma']
self.decay = settings['epsilon_decay']
self.rate = settings['epsilon_action']
self.action_count = 0
self.train = settings['train']
def get_Qvalue(self, state, action):
"""
:param state: tuple
:param action: int
:return: Q-value (int)
"""
return self.q.get((state, action), 0.0)
def learnQ(self, state, action, reward, value):
old_value = self.q.get((state, action), None)
if old_value is None:
self.q[(state, action)] = reward
else:
self.q[(state, action)] = old_value + \
self.alpha * (value - old_value)
def choose_Action(self, state):
"""
:param state: tuple
:return: action value (int)
"""
self.learn_decay()
if self.train:
if random() < self.epsilon:
action = choice(self.actions)
else:
q = [self.get_Qvalue(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[
i] == maxQ]
i = choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
else:
q = [self.get_Qvalue(state, a) for a in self.actions]
maxQ = max(q)
count = q.count(maxQ)
if count > 1:
best = [i for i in range(len(self.actions)) if q[i] == maxQ]
i = choice(best)
else:
i = q.index(maxQ)
action = self.actions[i]
self.action_count += 1
return action
def learn(self, state1, action1, reward, state2):
maxqnew = max([self.get_Qvalue(state2, a) for a in self.actions])
self.learnQ(state1, action1, reward, reward + self.gamma * maxqnew)
def learn_decay(self):
if self.action_count % self.rate == 0 and self.action_count > 0:
self.epsilon = self.epsilon * self.decay
def save_model(self, name):
"""
Saves the Dictionary to a Pickle
:param name: str
:return: None
"""
output = open(self.directory + name + '.pkl', 'wb')
pickle.dump(self.q, output)
output.close()
def load_model(self, name):
"""
Loads the Dictionary into the Class
:param name: str
:return: None
"""
pkl_file = open(self.directory + name + '.pkl', 'rb')
self.q = pickle.load(pkl_file)
pkl_file.close()
class Q_Neural_Network:
"""
Deep Q Learning (DQN) -> CNN - RNN - Linear Output
"""
def __init__(self, settings=None, track_height=None):
"""
:param settings: dictionary of settings
:param track_height: int
"""
assert settings is not None, 'Pass the Settings'
assert track_height is not None, 'Pass the track height'
self.q = {}
self.actions = range(settings['nb_actions'])
self.alpha = settings['alpha']
self.epsilon = settings['epsilon']
self.gamma = settings['gamma']
self.max_track = track_height
self.train = settings['train']
self.observations = []
self.directory = os.path.join(os.getcwd(), 'Model/NN_Model/')
# Neural Network Parameters
config = self.config()
self.batch_size = config['batch_size']
self.dropout = config['dropout']
self.neurons = config['hidden_units']
self.embedding_size = config['embedding_size']
self.input_dim = config['input_dim']
self.filter_length = config['filter_length']
self.nb_filter = config['nb_filter']
self.pool_length = config['pool_length']
self.obs_size = config['obs_size']
self.update_rate = config['update_rate']
self.old_state_m1 = None
self.action_m1 = None
self.reward_m1 = None
self.model = self.create_neural_network_rnn
self.updates = 0
def config(self):
"""
Neural Network (RNN) Configuration Settings
:return: dict
"""
c = dict(batch_size=8,
dropout=0.2,
hidden_units=120,
obs_size=15000,
embedding_size=120,
input_dim=30,
filter_length=17,
nb_filter=150,
pool_length=2,
update_rate=400)
return c
@property
def create_neural_network_rnn(self):
"""
Create the Neural Network Model
:return: Keras Modelh
"""
model = Sequential()
# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(12, # Number of Features from State Space
300, # Vector Size
input_length=self.input_dim))
# we add a Convolution1D, which will learn nb_filter
# word group filters of size filter_length:
model.add(Convolution1D(nb_filter=self.nb_filter,
filter_length=self.filter_length,
border_mode='valid',
activation='relu',
subsample_length=1))
# we use standard max pooling (halving the output of the previous
# layer):
model.add(MaxPooling1D(pool_length=self.pool_length))
model.add(Dropout(self.dropout))
# We flatten the output of the conv layer,
# so that we can add a vanilla dense layer:
model.add(Flatten())
# We add a vanilla hidden layer:
model.add(Dense(self.neurons))
model.add(Dropout(self.dropout))
model.add(Activation('relu'))
# We project onto a single unit output layer, and squash it with a
# sigmoid:
model.add(Dense(len(self.actions)))
model.add(Activation('linear'))
model.compile(loss='mse',
optimizer=Adadelta(lr=0.00025))
print(model.summary())
return model
def choose_Action(
self,
state=None,
pstate=None,
paction=None,
preward=None):
"""
:param state: tuple
:param pstate: tuple
:param paction: int
:param preward: int
:return: action value (int)
"""
if self.train:
if random() < self.epsilon or len(
self.observations) < self.obs_size or pstate is None:
action = np.random.randint(0, len(self.actions))
else:
state = np.concatenate(
(list(pstate), list(state))) + 1
state = np.asarray(state).reshape(1, self.input_dim)
qval = self.model.predict(state, batch_size=1)
action = (np.argmax(qval))
else:
if self.updates == 0 or pstate is None:
action = np.random.randint(0, len(self.actions))
self.updates += 1
else:
state = np.concatenate(
(list(pstate), list(state))) + 1
state = np.asarray(state).reshape(1, self.input_dim)
qval = self.model.predict(state, batch_size=1)
action = (np.argmax(qval))
return action
def update_train(self, p_state, action, p_reward, new_state, terminal):
"""
:param p_state:
:param action:
:param p_reward:
:param new_state:
:param terminal:
"""
self.observations.append((p_state, action, p_reward, new_state))
self.updates += 1
if self.updates % self.update_rate == 0 and self.updates > 0:
old = self.epsilon
self.epsilon = self.epsilon * (1 - 1e-4)
logging.info(
'Changing epsilon from {:.5f} to {:.5f}'.format(
old, self.epsilon))
# Train Model once enough history and every seven actions...
if len(
self.observations) >= self.obs_size and self.updates % self.update_rate == 0:
X_train, y_train = self.process_minibatch(terminal)
self.model.fit(X_train,
y_train,
batch_size=self.batch_size,
nb_epoch=1,
verbose=1,
shuffle=True)
if random() < 0.45:
self.save_model(name='TempModel')
def process_minibatch(self, terminal_rewards):
"""
Creates Training and Labels Arrays
:param terminal_rewards: list(2x int)
:return: tuple(np.array) (training data and labels)
"""
X_train = []
y_train = []
val = 0
for memory in self.observations:
if val == 0:
val += 1
old_state_m1, action_m1, reward_m1, new_state_m1 = memory
else:
# Get stored values.
old_state_m, action_m, reward_m, new_state_m = memory
# Get prediction on old state.
input = np.concatenate(
(list(old_state_m1), list(old_state_m))) + 1
old_state_m = input.reshape(1, self.input_dim)
old_qval = self.model.predict(old_state_m,
batch_size=1,
verbose=0)
# Get prediction on new state.
input2 = np.concatenate((old_state_m[
0][-15:], list(new_state_m))) + 1
new_state_m = input2.reshape(1, self.input_dim)
newQ = self.model.predict(new_state_m,
batch_size=1,
verbose=0)
maxQ = np.max(newQ)
y = np.zeros((1, len(self.actions)))
y[:] = old_qval[:]
# Check for terminal state.
if reward_m not in terminal_rewards:
update = (reward_m + (self.gamma * maxQ))
else:
update = reward_m
y[0][action_m] = update
X_train.append(old_state_m.reshape(self.input_dim,))
y_train.append(y.reshape(len(self.actions),))
self.old_state_m1, self.action_m1, self.reward_m1, new_state_m1 = memory
# Generate Numpy Arrays
X_train = np.array(X_train)
y_train = np.array(y_train)
return X_train, y_train
def save_model(self, name):
"""
Save the Neural Network Model
:param name: string (save name)
:return: None
"""
json_string = self.model.to_json()
open(
self.directory +
name +
'_architecture.json',
'w').write(json_string)
self.model.save_weights(self.directory + name + '_weights.h5',
overwrite=True)
logging.info('Model Saved!')
def load_model(self, name):
"""
load Keras model from JSON and weights
:param name: str
:return: None (Loads to Self)
"""
from keras.models import model_from_json
self.model = model_from_json(
open(
self.directory +
name +
'_architecture.json').read())
self.model.load_weights(self.directory + name + '_weights.h5')
logging.info('Model Loaded!')
| mit |
Juzley/grpc | src/python/src/grpc/framework/face/testing/base_util.py | 5 | 3776 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utilities for creating Base-layer objects for use in Face-layer tests."""
import abc
# interfaces is referenced from specification in this module.
from grpc.framework.base import util as _base_util
from grpc.framework.base.packets import implementations
from grpc.framework.base.packets import in_memory
from grpc.framework.base.packets import interfaces # pylint: disable=unused-import
from grpc.framework.foundation import logging_pool
_POOL_SIZE_LIMIT = 20
_MAXIMUM_TIMEOUT = 90
class LinkedPair(object):
"""A Front and Back that are linked to one another.
Attributes:
front: An interfaces.Front.
back: An interfaces.Back.
"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def shut_down(self):
"""Shuts down this object and releases its resources."""
raise NotImplementedError()
class _LinkedPair(LinkedPair):
def __init__(self, front, back, pools):
self.front = front
self.back = back
self._pools = pools
def shut_down(self):
_base_util.wait_for_idle(self.front)
_base_util.wait_for_idle(self.back)
for pool in self._pools:
pool.shutdown(wait=True)
def linked_pair(servicer, default_timeout):
"""Creates a Server and Stub linked together for use."""
link_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
front_work_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
front_transmission_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
front_utility_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
back_work_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
back_transmission_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
back_utility_pool = logging_pool.pool(_POOL_SIZE_LIMIT)
pools = (
link_pool,
front_work_pool, front_transmission_pool, front_utility_pool,
back_work_pool, back_transmission_pool, back_utility_pool)
link = in_memory.Link(link_pool)
front = implementations.front(
front_work_pool, front_transmission_pool, front_utility_pool)
back = implementations.back(
servicer, back_work_pool, back_transmission_pool, back_utility_pool,
default_timeout, _MAXIMUM_TIMEOUT)
front.join_rear_link(link)
link.join_fore_link(front)
back.join_fore_link(link)
link.join_rear_link(back)
return _LinkedPair(front, back, pools)
| bsd-3-clause |
nikolas/edx-platform | openedx/core/lib/tests/test_cache_utils.py | 144 | 2107 | """
Tests for cache_utils.py
"""
import ddt
from mock import MagicMock
from unittest import TestCase
from openedx.core.lib.cache_utils import memoize_in_request_cache
@ddt.ddt
class TestMemoizeInRequestCache(TestCase):
"""
Test the memoize_in_request_cache helper function.
"""
class TestCache(object):
"""
A test cache that provides a data dict for caching values, analogous to the request_cache.
"""
def __init__(self):
self.data = {}
def setUp(self):
super(TestMemoizeInRequestCache, self).setUp()
self.request_cache = self.TestCache()
@memoize_in_request_cache('request_cache')
def func_to_memoize(self, param):
"""
A test function whose results are to be memoized in the request_cache.
"""
return self.func_to_count(param)
@memoize_in_request_cache('request_cache')
def multi_param_func_to_memoize(self, param1, param2):
"""
A test function with multiple parameters whose results are to be memoized in the request_cache.
"""
return self.func_to_count(param1, param2)
def test_memoize_in_request_cache(self):
"""
Tests the memoize_in_request_cache decorator for both single-param and multiple-param functions.
"""
funcs_to_test = (
(self.func_to_memoize, ['foo'], ['bar']),
(self.multi_param_func_to_memoize, ['foo', 'foo2'], ['foo', 'foo3']),
)
for func_to_memoize, arg_list1, arg_list2 in funcs_to_test:
self.func_to_count = MagicMock() # pylint: disable=attribute-defined-outside-init
self.assertFalse(self.func_to_count.called)
func_to_memoize(*arg_list1)
self.func_to_count.assert_called_once_with(*arg_list1)
func_to_memoize(*arg_list1)
self.func_to_count.assert_called_once_with(*arg_list1)
for _ in range(10):
func_to_memoize(*arg_list1)
func_to_memoize(*arg_list2)
self.assertEquals(self.func_to_count.call_count, 2)
| agpl-3.0 |
justyns/home-assistant | homeassistant/components/thermostat/demo.py | 3 | 2491 | """
Demo platform that offers a fake thermostat.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
from homeassistant.components.thermostat import ThermostatDevice
from homeassistant.const import TEMP_CELCIUS, TEMP_FAHRENHEIT
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup the Demo thermostats."""
add_devices([
DemoThermostat("Nest", 21, TEMP_CELCIUS, False, 19, False),
DemoThermostat("Thermostat", 68, TEMP_FAHRENHEIT, True, 77, True),
])
# pylint: disable=too-many-arguments
class DemoThermostat(ThermostatDevice):
"""Representation of a demo thermostat."""
def __init__(self, name, target_temperature, unit_of_measurement,
away, current_temperature, is_fan_on):
"""Initialize the thermostat."""
self._name = name
self._target_temperature = target_temperature
self._unit_of_measurement = unit_of_measurement
self._away = away
self._current_temperature = current_temperature
self._is_fan_on = is_fan_on
@property
def should_poll(self):
"""No polling needed for a demo thermostat."""
return False
@property
def name(self):
"""Return the name of the thermostat."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def is_away_mode_on(self):
"""Return if away mode is on."""
return self._away
@property
def is_fan_on(self):
"""Return true if the fan is on."""
return self._is_fan_on
def set_temperature(self, temperature):
"""Set new target temperature."""
self._target_temperature = temperature
def turn_away_mode_on(self):
"""Turn away mode on."""
self._away = True
def turn_away_mode_off(self):
"""Turn away mode off."""
self._away = False
def turn_fan_on(self):
"""Turn fan on."""
self._is_fan_on = True
def turn_fan_off(self):
"""Turn fan off."""
self._is_fan_on = False
| mit |
michelts/lettuce | tests/integration/lib/Django-1.2.5/tests/regressiontests/cache/tests.py | 38 | 28208 | # -*- coding: utf-8 -*-
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import os
import tempfile
import time
import unittest
import warnings
from django.conf import settings
from django.core import management
from django.core.cache import get_cache
from django.core.cache.backends.base import CacheKeyWarning
from django.http import HttpResponse, HttpRequest
from django.middleware.cache import FetchFromCacheMiddleware, UpdateCacheMiddleware
from django.test import TestCase
from django.test.utils import get_warnings_state, restore_warnings_state
from django.utils import translation
from django.utils.cache import patch_vary_headers, get_cache_key, learn_cache_key
from django.utils.hashcompat import md5_constructor
from regressiontests.cache.models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class DummyCacheTests(unittest.TestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has different test requirements.
def setUp(self):
self.cache = get_cache('dummy://')
def test_simple(self):
"Dummy cache backend ignores cache set calls"
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), None)
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, True)
self.assertEqual(self.cache.get("addkey1"), None)
def test_non_existent(self):
"Non-existent keys aren't found in the dummy cache backend"
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {})
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), None)
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), False)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, False)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
"Dummy cache values can't be incremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.incr, 'answer')
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
"Dummy cache values can't be decremented"
self.cache.set('answer', 42)
self.assertRaises(ValueError, self.cache.decr, 'answer')
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), None)
def test_expiration(self):
"Expiration has no effect on the dummy cache"
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), None)
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
u'ascii': u'ascii_value',
u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1',
u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2',
u'ascii': {u'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), None)
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.cache.set_many({'a': 1, 'b': 2})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
self.cache.delete_many(['a', 'b'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
self.cache.clear()
class BaseCacheTests(object):
# A common set of tests to apply to all cache backends
def tearDown(self):
self.cache.clear()
def test_simple(self):
# Simple cache set/get works
self.cache.set("key", "value")
self.assertEqual(self.cache.get("key"), "value")
def test_add(self):
# A key can be added to a cache
self.cache.add("addkey1", "value")
result = self.cache.add("addkey1", "newvalue")
self.assertEqual(result, False)
self.assertEqual(self.cache.get("addkey1"), "value")
def test_non_existent(self):
# Non-existent cache keys return as None/default
# get with non-existent keys
self.assertEqual(self.cache.get("does_not_exist"), None)
self.assertEqual(self.cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
self.cache.set('a', 'a')
self.cache.set('b', 'b')
self.cache.set('c', 'c')
self.cache.set('d', 'd')
self.assertEqual(self.cache.get_many(['a', 'c', 'd']), {'a' : 'a', 'c' : 'c', 'd' : 'd'})
self.assertEqual(self.cache.get_many(['a', 'b', 'e']), {'a' : 'a', 'b' : 'b'})
def test_delete(self):
# Cache keys can be deleted
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.assertEqual(self.cache.get("key1"), "spam")
self.cache.delete("key1")
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), "eggs")
def test_has_key(self):
# The cache can be inspected for cache keys
self.cache.set("hello1", "goodbye1")
self.assertEqual(self.cache.has_key("hello1"), True)
self.assertEqual(self.cache.has_key("goodbye1"), False)
def test_in(self):
# The in operator can be used to inspet cache contents
self.cache.set("hello2", "goodbye2")
self.assertEqual("hello2" in self.cache, True)
self.assertEqual("goodbye2" in self.cache, False)
def test_incr(self):
# Cache values can be incremented
self.cache.set('answer', 41)
self.assertEqual(self.cache.incr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.incr('answer', 10), 52)
self.assertEqual(self.cache.get('answer'), 52)
self.assertRaises(ValueError, self.cache.incr, 'does_not_exist')
def test_decr(self):
# Cache values can be decremented
self.cache.set('answer', 43)
self.assertEqual(self.cache.decr('answer'), 42)
self.assertEqual(self.cache.get('answer'), 42)
self.assertEqual(self.cache.decr('answer', 10), 32)
self.assertEqual(self.cache.get('answer'), 32)
self.assertRaises(ValueError, self.cache.decr, 'does_not_exist')
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string' : 'this is a string',
'int' : 42,
'list' : [1, 2, 3, 4],
'tuple' : (1, 2, 3, 4),
'dict' : {'A': 1, 'B' : 2},
'function' : f,
'class' : C,
}
self.cache.set("stuff", stuff)
self.assertEqual(self.cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
self.cache.set('question', my_poll)
cached_poll = self.cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
self.cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cached_polls = self.cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
self.cache.set('expire1', 'very quickly', 1)
self.cache.set('expire2', 'very quickly', 1)
self.cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertEqual(self.cache.get("expire1"), None)
self.cache.add("expire2", "newvalue")
self.assertEqual(self.cache.get("expire2"), "newvalue")
self.assertEqual(self.cache.has_key("expire3"), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
u'ascii': u'ascii_value',
u'unicode_ascii': u'Iñtërnâtiônàlizætiøn1',
u'Iñtërnâtiônàlizætiøn': u'Iñtërnâtiônàlizætiøn2',
u'ascii': {u'x' : 1 }
}
for (key, value) in stuff.items():
self.cache.set(key, value)
self.assertEqual(self.cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cachable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value)
self.cache.set('binary1', compressed_value)
compressed_result = self.cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result))
def test_set_many(self):
# Multiple keys can be set using set_many
self.cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(self.cache.get("key1"), "spam")
self.assertEqual(self.cache.get("key2"), "eggs")
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
self.cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.set("key3", "ham")
self.cache.delete_many(["key1", "key2"])
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
self.assertEqual(self.cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
self.cache.set("key1", "spam")
self.cache.set("key2", "eggs")
self.cache.clear()
self.assertEqual(self.cache.get("key1"), None)
self.assertEqual(self.cache.get("key2"), None)
def test_long_timeout(self):
'''
Using a timeout greater than 30 days makes memcached think
it is an absolute expiration timestamp instead of a relative
offset. Test that we honour this convention. Refs #12399.
'''
self.cache.set('key1', 'eggs', 60*60*24*30 + 1) #30 days + 1 second
self.assertEqual(self.cache.get('key1'), 'eggs')
self.cache.add('key2', 'ham', 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key2'), 'ham')
self.cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60*60*24*30 + 1)
self.assertEqual(self.cache.get('key3'), 'sausage')
self.assertEqual(self.cache.get('key4'), 'lobster bisque')
def perform_cull_test(self, initial_count, final_count):
"""This is implemented as a utility method, because only some of the backends
implement culling. The culling algorithm also varies slightly, so the final
number of entries will vary between backends"""
# Create initial cache key entries. This will overflow the cache, causing a cull
for i in range(1, initial_count):
self.cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if self.cache.has_key('cull%d' % i):
count = count + 1
self.assertEqual(count, final_count)
def test_invalid_keys(self):
"""
All the builtin backends (except memcached, see below) should warn on
keys that would be refused by memcached. This encourages portable
caching code without making it too difficult to use production backends
with more liberal key rules. Refs #6447.
"""
# On Python 2.6+ we could use the catch_warnings context
# manager to test this warning nicely. Since we can't do that
# yet, the cleanest option is to temporarily ask for
# CacheKeyWarning to be raised as an exception.
_warnings_state = get_warnings_state()
warnings.simplefilter("error", CacheKeyWarning)
try:
# memcached does not allow whitespace or control characters in keys
self.assertRaises(CacheKeyWarning, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(CacheKeyWarning, self.cache.set, 'a' * 251, 'value')
finally:
restore_warnings_state(_warnings_state)
class DBCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
# Spaces are used in the table name to ensure quoting/escaping is working
self._table_name = 'test cache table'
management.call_command('createcachetable', self._table_name, verbosity=0, interactive=False)
self.cache = get_cache('db://%s?max_entries=30' % self._table_name)
def tearDown(self):
from django.db import connection
cursor = connection.cursor()
cursor.execute('DROP TABLE %s' % connection.ops.quote_name(self._table_name))
def test_cull(self):
self.perform_cull_test(50, 29)
class LocMemCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
self.cache = get_cache('locmem://?max_entries=30')
def test_cull(self):
self.perform_cull_test(50, 29)
# memcached backend isn't guaranteed to be available.
# To check the memcached backend, the test settings file will
# need to contain a CACHE_BACKEND setting that points at
# your memcache server.
if settings.CACHE_BACKEND.startswith('memcached://'):
class MemcachedCacheTests(unittest.TestCase, BaseCacheTests):
def setUp(self):
self.cache = get_cache(settings.CACHE_BACKEND)
def test_invalid_keys(self):
"""
On memcached, we don't introduce a duplicate key validation
step (for speed reasons), we just let the memcached API
library raise its own exception on bad keys. Refs #6447.
In order to be memcached-API-library agnostic, we only assert
that a generic exception of some kind is raised.
"""
# memcached does not allow whitespace or control characters in keys
self.assertRaises(Exception, self.cache.set, 'key with spaces', 'value')
# memcached limits key length to 250
self.assertRaises(Exception, self.cache.set, 'a' * 251, 'value')
class FileBasedCacheTests(unittest.TestCase, BaseCacheTests):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
self.dirname = tempfile.mkdtemp()
self.cache = get_cache('file://%s?max_entries=30' % self.dirname)
def test_hashing(self):
"""Test that keys are hashed into subdirectories correctly"""
self.cache.set("foo", "bar")
keyhash = md5_constructor("foo").hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assert_(os.path.exists(keypath))
def test_subdirectory_removal(self):
"""
Make sure that the created subdirectories are correctly removed when empty.
"""
self.cache.set("foo", "bar")
keyhash = md5_constructor("foo").hexdigest()
keypath = os.path.join(self.dirname, keyhash[:2], keyhash[2:4], keyhash[4:])
self.assert_(os.path.exists(keypath))
self.cache.delete("foo")
self.assert_(not os.path.exists(keypath))
self.assert_(not os.path.exists(os.path.dirname(keypath)))
self.assert_(not os.path.exists(os.path.dirname(os.path.dirname(keypath))))
def test_cull(self):
self.perform_cull_test(50, 28)
class CustomCacheKeyValidationTests(unittest.TestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
cache = get_cache('regressiontests.cache.liberal_backend://')
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
class CacheUtils(unittest.TestCase):
"""TestCase for django.utils.cache functions."""
def setUp(self):
self.path = '/cache/test/'
self.old_settings_key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.old_middleware_seconds = settings.CACHE_MIDDLEWARE_SECONDS
self.orig_use_i18n = settings.USE_I18N
settings.CACHE_MIDDLEWARE_KEY_PREFIX = 'settingsprefix'
settings.CACHE_MIDDLEWARE_SECONDS = 1
settings.USE_I18N = False
def tearDown(self):
settings.CACHE_MIDDLEWARE_KEY_PREFIX = self.old_settings_key_prefix
settings.CACHE_MIDDLEWARE_SECONDS = self.old_middleware_seconds
settings.USE_I18N = self.orig_use_i18n
def _get_request(self, path):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = "/cache/%s" % path
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
response = HttpResponse()
if initial_vary is not None:
response['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertEqual(get_cache_key(request), None)
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
# Verify that a specified key_prefix is taken in to account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(get_cache_key(request, key_prefix=key_prefix), 'views.decorators.cache.cache_page.localprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
def test_learn_cache_key(self):
request = self._get_request(self.path)
response = HttpResponse()
response['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(get_cache_key(request), 'views.decorators.cache.cache_page.settingsprefix.a8c87a3d8c44853d7f79474f7ffe4ad5.d41d8cd98f00b204e9800998ecf8427e')
class CacheI18nTest(unittest.TestCase):
def setUp(self):
self.orig_cache_middleware_seconds = settings.CACHE_MIDDLEWARE_SECONDS
self.orig_cache_middleware_key_prefix = settings.CACHE_MIDDLEWARE_KEY_PREFIX
self.orig_cache_backend = settings.CACHE_BACKEND
self.orig_use_i18n = settings.USE_I18N
self.orig_languages = settings.LANGUAGES
settings.LANGUAGES = (
('en', 'English'),
('es', 'Spanish'),
)
settings.CACHE_MIDDLEWARE_KEY_PREFIX = 'settingsprefix'
self.path = '/cache/test/'
def tearDown(self):
settings.CACHE_MIDDLEWARE_SECONDS = self.orig_cache_middleware_seconds
settings.CACHE_MIDDLEWARE_KEY_PREFIX = self.orig_cache_middleware_key_prefix
settings.CACHE_BACKEND = self.orig_cache_backend
settings.USE_I18N = self.orig_use_i18n
settings.LANGUAGES = self.orig_languages
translation.deactivate()
def _get_request(self):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = self.path
return request
def _get_request_cache(self):
request = HttpRequest()
request.META = {
'SERVER_NAME': 'testserver',
'SERVER_PORT': 80,
}
request.path = request.path_info = self.path
request._cache_update_cache = True
request.method = 'GET'
request.session = {}
return request
def test_cache_key_i18n(self):
settings.USE_I18N = True
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertTrue(key.endswith(lang), "Cache keys should include the language name when i18n is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def test_cache_key_no_i18n (self):
settings.USE_I18N = False
request = self._get_request()
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertFalse(key.endswith(lang), "Cache keys shouldn't include the language name when i18n is inactive")
def test_middleware(self):
def set_cache(request, lang, msg):
translation.activate(lang)
response = HttpResponse()
response.content= msg
return UpdateCacheMiddleware().process_response(request, response)
settings.CACHE_MIDDLEWARE_SECONDS = 60
settings.CACHE_MIDDLEWARE_KEY_PREFIX="test"
settings.CACHE_BACKEND='locmem:///'
settings.USE_I18N = True
en_message ="Hello world!"
es_message ="Hola mundo!"
request = self._get_request_cache()
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware().process_request(request)
# Check that we can recover the cache
self.assertNotEqual(get_cache_data.content, None)
self.assertEqual(en_message, get_cache_data.content)
# change the session language and set content
request = self._get_request_cache()
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, en_message)
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware().process_request(request)
self.assertEqual(get_cache_data.content, es_message)
class CacheMiddlewareAnonymousOnlyTests(TestCase):
urls = 'regressiontests.cache.urls'
def setUp(self):
self._orig_cache_middleware_anonymous_only = \
getattr(settings, 'CACHE_MIDDLEWARE_ANONYMOUS_ONLY', False)
self._orig_middleware_classes = settings.MIDDLEWARE_CLASSES
settings.MIDDLEWARE_CLASSES = list(settings.MIDDLEWARE_CLASSES)
settings.MIDDLEWARE_CLASSES.insert(0, 'django.middleware.cache.UpdateCacheMiddleware')
settings.MIDDLEWARE_CLASSES += ['django.middleware.cache.FetchFromCacheMiddleware']
def tearDown(self):
settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY = self._orig_cache_middleware_anonymous_only
settings.MIDDLEWARE_CLASSES = self._orig_middleware_classes
def test_cache_middleware_anonymous_only_does_not_cause_vary_cookie(self):
settings.CACHE_MIDDLEWARE_ANONYMOUS_ONLY = True
response = self.client.get('/')
self.failIf('Cookie' in response.get('Vary', ''))
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
procangroup/edx-platform | common/test/acceptance/fixtures/config.py | 14 | 3153 | """
Fixture to manipulate configuration models.
"""
import json
import re
import requests
from lazy import lazy
from common.test.acceptance.fixtures import LMS_BASE_URL, STUDIO_BASE_URL
class ConfigModelFixtureError(Exception):
"""
Error occurred while configuring the stub XQueue.
"""
pass
class ConfigModelFixture(object):
"""
Configure a ConfigurationModel by using it's JSON api.
"""
def __init__(self, api_base, configuration, platform='lms'):
"""
Configure a ConfigurationModel exposed at `api_base` to have the configuration `configuration`.
"""
self._api_base = api_base
self._configuration = configuration
self._platform = platform
def install(self):
"""
Configure the stub via HTTP.
"""
base_url = STUDIO_BASE_URL if self._platform == 'cms' else LMS_BASE_URL
url = base_url + self._api_base
response = self.session.post(
url,
data=json.dumps(self._configuration),
headers=self.headers,
)
if not response.ok:
raise ConfigModelFixtureError(
"Could not configure url '{}'. response: {} - {}".format(
self._api_base,
response,
response.content,
)
)
@lazy
def session_cookies(self):
"""
Log in as a staff user, then return the cookies for the session (as a dict)
Raises a `ConfigModelFixtureError` if the login fails.
"""
return {key: val for key, val in self.session.cookies.items()}
@lazy
def headers(self):
"""
Default HTTP headers dict.
"""
return {
'Content-type': 'application/json',
'Accept': 'application/json',
'X-CSRFToken': self.session_cookies.get('csrftoken', '')
}
@lazy
def session(self):
"""
Log in as a staff user, then return a `requests` `session` object for the logged in user.
Raises a `StudioApiLoginError` if the login fails.
"""
# Use auto-auth to retrieve the session for a logged in user
session = requests.Session()
response = session.get(LMS_BASE_URL + "/auto_auth?superuser=true")
# Return the session from the request
if response.ok:
# auto_auth returns information about the newly created user
# capture this so it can be used by by the testcases.
user_pattern = re.compile(r'Logged in user {0} \({1}\) with password {2} and user_id {3}'.format(
r'(?P<username>\S+)', r'(?P<email>[^\)]+)', r'(?P<password>\S+)', r'(?P<user_id>\d+)'))
user_matches = re.match(user_pattern, response.text)
if user_matches:
self.user = user_matches.groupdict() # pylint: disable=attribute-defined-outside-init
return session
else:
msg = "Could not log in to use ConfigModel restful API. Status code: {0}".format(response.status_code)
raise ConfigModelFixtureError(msg)
| agpl-3.0 |
mavit/ansible | test/units/modules/network/nos/nos_module.py | 27 | 2501 | # (c) 2018 Extreme Networks Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
from units.modules.utils import AnsibleExitJson, AnsibleFailJson, ModuleTestCase
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as file_desc:
data = file_desc.read()
try:
data = json.loads(data)
except:
pass
fixture_data[path] = data
return data
class TestNosModule(ModuleTestCase):
def execute_module(self, failed=False, changed=False, commands=None, sort=True, defaults=False):
self.load_fixtures(commands)
if failed:
result = self.failed()
self.assertTrue(result['failed'], result)
else:
result = self.changed(changed)
self.assertEqual(result['changed'], changed, result)
if commands is not None:
if sort:
self.assertEqual(sorted(commands), sorted(result['commands']), result['commands'])
else:
self.assertEqual(commands, result['commands'], result['commands'])
return result
def failed(self):
with self.assertRaises(AnsibleFailJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertTrue(result['failed'], result)
return result
def changed(self, changed=False):
with self.assertRaises(AnsibleExitJson) as exc:
self.module.main()
result = exc.exception.args[0]
self.assertEqual(result['changed'], changed, result)
return result
def load_fixtures(self, commands=None):
pass
| gpl-3.0 |
FabienPean/sofa | applications/plugins/SofaPython/python/SofaPython/DAGValidation.py | 11 | 3766 | import sys
import Sofa
import Tools
def MechanicalObjectVisitor(node):
## listing mechanical states, bottom-up from node
ancestors = []
visited = []
for p in node.getParents():
path = p.getPathName()
if not path in visited:
state = p.getMechanicalState()
if not state is None:
ancestors.append( path+"/"+state.name )
ancestors += MechanicalObjectVisitor( p )
return ancestors
class Visitor(object):
## checking that mapping graph is equivalent to node graph
## checking that independent dofs are not under other dofs in the scene graph
def __init__(self):
#print "DAGValidationVisitor"
self.error = []
def treeTraversal(self):
#print 'ValidationVisitor treeTraversal'
return -1 # dag
def processNodeTopDown(self,node):
#print node.name
state = node.getMechanicalState()
if state is None:
return True
mapping = node.getMechanicalMapping()
if mapping is None: #independent dofs
ancestors = MechanicalObjectVisitor(node)
if not len(ancestors) is 0: # an independent dof is under other dofs in the scene graph
err = "ERROR "
err += "mechanical state '"+state.getContext().getPathName()+"/"+state.name+"' is independent (no mapping)"
err += " and should not be in the child node of other mechanical states ("+Tools.listToStr(ancestors)+")"
self.error.append(err)
else: # mapped dofs
#print mapping.getName()
from_dof = mapping.getFrom()
parent_node = mapping.getContext().getParents()
parent_node_path = []
for p in parent_node:
parent_node_path.append( p.getPathName() )
from_node_path = []
for f in from_dof:
from_node_path.append( f.getContext().getPathName() )
#print parent_node_path
for f in from_node_path:
#print f
if not f in parent_node_path:
err = "ERROR "
err += "'"+mapping.getContext().getPathName()+"/"+mapping.name+"': "
err += "'"+ f + "' should be a parent node"
self.error.append(err)
#print err
for p in parent_node_path:
#print p
if not p in from_node_path:
err = "ERROR "
err += "'"+mapping.getContext().getPathName()+"/"+mapping.name+"': "
err += "'"+p + "' should NOT be a parent node"
self.error.append(err)
#print err
#print "==================="
return True
def processNodeBottomUp(self,node):
return True
def test( node, silent=False ):
## checking that mapping graph is equivalent to node graph
## checking that independent dofs are not under other dofs in the scene graph
## return a list of errors
if not silent:
print ""
print "====== SofaPython.DAGValidation.test ======================="
print ""
print "Validating scene from node '/" + node.getPathName() + "'..."
vis = Visitor()
node.executeVisitor(vis)
if not silent:
if len(vis.error) is 0:
print "... VALIDATED"
else:
print "... NOT VALID"
print ""
for e in vis.error:
print e
print ""
print "=============================================================="
sys.stdout.flush()
return vis.error
| lgpl-2.1 |
solintegra/addons | purchase_landed_cost/models/stock_picking.py | 18 | 1336 | # -*- encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
##############################################################################
from openerp import models, api
class StockPicking(models.Model):
_inherit = 'stock.picking'
@api.multi
def action_open_landed_cost(self):
self.ensure_one()
line_obj = self.env['purchase.cost.distribution.line']
lines = line_obj.search([('picking_id', '=', self.id)])
if lines:
mod_obj = self.env['ir.model.data']
model, action_id = tuple(
mod_obj.get_object_reference(
'purchase_landed_cost',
'action_purchase_cost_distribution'))
action = self.env[model].browse(action_id).read()[0]
ids = set([x.distribution.id for x in lines])
if len(ids) == 1:
res = mod_obj.get_object_reference(
'purchase_landed_cost', 'purchase_cost_distribution_form')
action['views'] = [(res and res[1] or False, 'form')]
action['res_id'] = list(ids)[0]
else:
action['domain'] = "[('id', 'in', %s)]" % list(ids)
return action
| agpl-3.0 |
AnhellO/DAS_Sistemas | Ago-Dic-2019/Ricardo_Romero_Medina/Ordinario/buscarLocacion.py | 1 | 1341 | import requests
import ubicacion
import database
class obtener_Locacion():
r = requests.get('https://rickandmortyapi.com/api/location/')
texto = r.json()
paginas = texto['info']['pages']
db = database.basedatos()
db.crear_tabla_locaciones()
for i in range(1,paginas+1):
r = requests.get('https://rickandmortyapi.com/api/location/?page={}'.format(i))
texto = r.json()
location = texto['results']
for locacion in location:
residentes = []
res = locacion['residents']
for j in range(0,len(res)):
if len(res) != 0:
cadena = res[j]
parte = ''
parte += cadena[42]
if len(cadena) >= 44:
parte += cadena[43]
if len(cadena) == 45:
parte += cadena[44]
residentes.append(parte)
else:
parte = 'Sin Residentes'
residentes.append(parte)
loc = ubicacion.ubicaciones(locacion['id'],locacion['name'],locacion['type'],locacion['dimension'],locacion['residents'],residentes,locacion['url'])
db.insertar_locaciones(loc)
if __name__ == '__main__':
obtener_Locacion() | mit |
indirectlylit/kolibri | kolibri/deployment/default/settings/base.py | 2 | 11441 | # -*- coding: utf-8 -*-
"""
Django settings for kolibri project.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import pytz
from django.conf import locale
from six.moves.urllib.parse import urljoin
from tzlocal import get_localzone
import kolibri
from kolibri.deployment.default.cache import CACHES
from kolibri.plugins.utils.settings import apply_settings
from kolibri.utils import conf
from kolibri.utils import i18n
from kolibri.utils.logger import get_logging_config
try:
isolation_level = None
import psycopg2 # noqa
isolation_level = psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE
except ImportError:
pass
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
# import kolibri, so we can get the path to the module.
# we load other utilities related to i18n
# This is essential! We load the kolibri conf INSIDE the Django conf
KOLIBRI_MODULE_PATH = os.path.dirname(kolibri.__file__)
BASE_DIR = os.path.abspath(os.path.dirname(__name__))
LOCALE_PATHS = [os.path.join(KOLIBRI_MODULE_PATH, "locale")]
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.11/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "f@ey3)y^03r9^@mou97apom*+c1m#b1!cwbm50^s4yk72xce27"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = conf.OPTIONS["Server"]["DEBUG"]
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"kolibri.core",
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"django_filters",
"kolibri.core.auth.apps.KolibriAuthConfig",
"kolibri.core.bookmarks",
"kolibri.core.content",
"kolibri.core.logger",
"kolibri.core.notifications.apps.KolibriNotificationsConfig",
"kolibri.core.tasks.apps.KolibriTasksConfig",
"kolibri.core.deviceadmin",
"kolibri.core.webpack",
"kolibri.core.exams",
"kolibri.core.device",
"kolibri.core.discovery",
"kolibri.core.lessons",
"kolibri.core.analytics",
"rest_framework",
"django_js_reverse",
"jsonfield",
"morango",
]
MIDDLEWARE = [
"kolibri.core.analytics.middleware.cherrypy_access_log_middleware",
"kolibri.core.device.middleware.ProvisioningErrorHandler",
"django.middleware.cache.UpdateCacheMiddleware",
"kolibri.core.analytics.middleware.MetricsMiddleware",
"kolibri.core.auth.middleware.KolibriSessionMiddleware",
"kolibri.core.device.middleware.KolibriLocaleMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"kolibri.core.auth.middleware.CustomAuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
"django.middleware.security.SecurityMiddleware",
"django.middleware.cache.FetchFromCacheMiddleware",
]
# By default don't cache anything unless it explicitly requests it to!
CACHE_MIDDLEWARE_SECONDS = 0
CACHE_MIDDLEWARE_KEY_PREFIX = "pages"
CACHES = CACHES
ROOT_URLCONF = "kolibri.deployment.default.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"kolibri.core.context_processors.custom_context_processor.developer_mode",
]
},
}
]
WSGI_APPLICATION = "kolibri.deployment.default.wsgi.application"
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if conf.OPTIONS["Database"]["DATABASE_ENGINE"] == "sqlite":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(
conf.KOLIBRI_HOME,
conf.OPTIONS["Database"]["DATABASE_NAME"] or "db.sqlite3",
),
"OPTIONS": {"timeout": 100},
},
"notifications_db": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": os.path.join(conf.KOLIBRI_HOME, "notifications.sqlite3"),
"OPTIONS": {"timeout": 100},
},
}
DATABASE_ROUTERS = ("kolibri.core.notifications.models.NotificationsRouter",)
elif conf.OPTIONS["Database"]["DATABASE_ENGINE"] == "postgres":
DATABASES = {
"default": {
"ENGINE": "django.db.backends.postgresql",
"NAME": conf.OPTIONS["Database"]["DATABASE_NAME"],
"PASSWORD": conf.OPTIONS["Database"]["DATABASE_PASSWORD"],
"USER": conf.OPTIONS["Database"]["DATABASE_USER"],
"HOST": conf.OPTIONS["Database"]["DATABASE_HOST"],
"PORT": conf.OPTIONS["Database"]["DATABASE_PORT"],
"TEST": {"NAME": "test"},
},
"default-serializable": {
"ENGINE": "django.db.backends.postgresql",
"NAME": conf.OPTIONS["Database"]["DATABASE_NAME"],
"PASSWORD": conf.OPTIONS["Database"]["DATABASE_PASSWORD"],
"USER": conf.OPTIONS["Database"]["DATABASE_USER"],
"HOST": conf.OPTIONS["Database"]["DATABASE_HOST"],
"PORT": conf.OPTIONS["Database"]["DATABASE_PORT"],
"OPTIONS": {"isolation_level": isolation_level},
"TEST": {"MIRROR": "default"},
},
}
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
# For language names, see:
# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# http://helpsharepointvision.nevron.com/Culture_Table.html
# django-specific format, e.g.: [ ('bn-bd', 'বাংলা'), ('en', 'English'), ...]
LANGUAGES = [
(
i18n.KOLIBRI_LANGUAGE_INFO[lang_code]["intl_code"],
i18n.KOLIBRI_LANGUAGE_INFO[lang_code]["language_name"],
)
for lang_code in conf.OPTIONS["Deployment"]["LANGUAGES"]
if lang_code in i18n.KOLIBRI_LANGUAGE_INFO
]
# Some languages are not supported out-of-the-box by Django
# Here, we use the language code in Intl.js
EXTRA_LANG_INFO = {
"ff-cm": {
"bidi": False,
"code": "ff-cm",
"name": "Fulfulde (Cameroon)",
"name_local": "Fulfulde Mbororoore",
},
"es-419": {
"bidi": False,
"code": "es-419",
"name": "Spanish (Latin America)",
"name_local": "Español",
},
"es-es": {
"bidi": False,
"code": "es-es",
"name": "Spanish (Spain)",
"name_local": "Español (España)",
},
"fr-ht": {
"bidi": False,
"code": "fr-ht",
"name": "Haitian Creole",
"name_local": "Kreyòl ayisyen",
},
"gu-in": {
"bidi": False,
"code": "gu-in",
"name": "Gujarati",
"name_local": "ગુજરાતી",
},
"km": {"bidi": False, "code": "km", "name": "Khmer", "name_local": "ភាសាខ្មែរ"},
"nyn": {
"bidi": False,
"code": "nyn",
"name": "Chichewa, Chewa, Nyanja",
"name_local": "Chinyanja",
},
"zh": {
"bidi": False,
"code": "zh-hans",
"name": "Simplified Chinese",
"name_local": "简体中文",
},
"yo": {"bidi": False, "code": "yo", "name": "Yoruba", "name_local": "Yorùbá"},
"zu": {"bidi": False, "code": "zu", "name": "Zulu", "name_local": "isiZulu"},
}
locale.LANG_INFO.update(EXTRA_LANG_INFO)
LANGUAGE_CODE = (
"en"
if "en" in conf.OPTIONS["Deployment"]["LANGUAGES"]
else conf.OPTIONS["Deployment"]["LANGUAGES"][0]
)
try:
TIME_ZONE = get_localzone().zone
except (pytz.UnknownTimeZoneError, ValueError):
# Do not fail at this point because a timezone was not
# detected.
TIME_ZONE = pytz.utc.zone
# Fixes https://github.com/regebro/tzlocal/issues/44
# tzlocal 1.4 returns 'local' if unable to detect the timezone,
# and this TZ id is invalid
if TIME_ZONE == "local":
TIME_ZONE = pytz.utc.zone
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
path_prefix = conf.OPTIONS["Deployment"]["URL_PATH_PREFIX"]
if path_prefix != "/":
path_prefix = "/" + path_prefix
STATIC_URL = urljoin(path_prefix, "static/")
STATIC_ROOT = os.path.join(conf.KOLIBRI_HOME, "static")
MEDIA_URL = urljoin(path_prefix, "media/")
MEDIA_ROOT = os.path.join(conf.KOLIBRI_HOME, "media")
# https://docs.djangoproject.com/en/1.11/ref/settings/#csrf-cookie-path
# Ensure that our CSRF cookie does not collide with other CSRF cookies
# set by other Django apps served from the same domain.
CSRF_COOKIE_PATH = path_prefix
CSRF_COOKIE_NAME = "kolibri_csrftoken"
# https://docs.djangoproject.com/en/1.11/ref/settings/#session-cookie-path
# Ensure that our session cookie does not collidge with other session cookies
# set by other Django apps served from the same domain.
SESSION_COOKIE_PATH = path_prefix
# https://docs.djangoproject.com/en/1.11/ref/settings/#std:setting-LOGGING
# https://docs.djangoproject.com/en/1.11/topics/logging/
LOGGING = get_logging_config(
conf.LOG_ROOT,
debug=DEBUG,
debug_database=conf.OPTIONS["Server"]["DEBUG_LOG_DATABASE"],
)
# Customizing Django auth system
# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/
AUTH_USER_MODEL = "kolibriauth.FacilityUser"
# Our own custom setting to override the anonymous user model
AUTH_ANONYMOUS_USER_MODEL = "kolibriauth.KolibriAnonymousUser"
AUTHENTICATION_BACKENDS = ["kolibri.core.auth.backends.FacilityUserBackend"]
# Django REST Framework
# http://www.django-rest-framework.org/api-guide/settings/
REST_FRAMEWORK = {
"UNAUTHENTICATED_USER": "kolibri.core.auth.models.KolibriAnonymousUser",
"DEFAULT_AUTHENTICATION_CLASSES": [
"rest_framework.authentication.SessionAuthentication"
],
"DEFAULT_CONTENT_NEGOTIATION_CLASS": "kolibri.core.negotiation.LimitContentNegotiation",
"EXCEPTION_HANDLER": "kolibri.core.utils.exception_handler.custom_exception_handler",
}
# System warnings to disable
# see https://docs.djangoproject.com/en/1.11/ref/settings/#silenced-system-checks
SILENCED_SYSTEM_CHECKS = ["auth.W004"]
# Configuration for Django JS Reverse
# https://github.com/ierror/django-js-reverse#options
JS_REVERSE_EXCLUDE_NAMESPACES = ["admin"]
ENABLE_DATA_BOOTSTRAPPING = True
# Session configuration
SESSION_ENGINE = "django.contrib.sessions.backends.file"
SESSION_FILE_PATH = os.path.join(conf.KOLIBRI_HOME, "sessions")
if not os.path.exists(SESSION_FILE_PATH):
if not os.path.exists(conf.KOLIBRI_HOME):
raise RuntimeError("The KOLIBRI_HOME dir does not exist")
os.mkdir(SESSION_FILE_PATH)
SESSION_COOKIE_NAME = "kolibri"
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SESSION_COOKIE_AGE = 1200
apply_settings(sys.modules[__name__])
| mit |
kamalx/edx-platform | common/lib/xmodule/xmodule/tests/test_word_cloud.py | 166 | 1792 | # -*- coding: utf-8 -*-
"""Test for Word cloud Xmodule functional logic."""
from webob.multidict import MultiDict
from xmodule.word_cloud_module import WordCloudDescriptor
from . import LogicTest
class WordCloudModuleTest(LogicTest):
"""Logic tests for Word Cloud Xmodule."""
descriptor_class = WordCloudDescriptor
raw_field_data = {
'all_words': {'cat': 10, 'dog': 5, 'mom': 1, 'dad': 2},
'top_words': {'cat': 10, 'dog': 5, 'dad': 2},
'submitted': False
}
def test_bad_ajax_request(self):
"Make sure that answer for incorrect request is error json"
response = self.ajax_request('bad_dispatch', {})
self.assertDictEqual(response, {
'status': 'fail',
'error': 'Unknown Command!'
})
def test_good_ajax_request(self):
"Make sure that ajax request works correctly"
post_data = MultiDict(('student_words[]', word) for word in ['cat', 'cat', 'dog', 'sun'])
response = self.ajax_request('submit', post_data)
self.assertEqual(response['status'], 'success')
self.assertEqual(response['submitted'], True)
self.assertEqual(response['total_count'], 22)
self.assertDictEqual(
response['student_words'],
{'sun': 1, 'dog': 6, 'cat': 12}
)
self.assertListEqual(
response['top_words'],
[{'text': 'dad', 'size': 2, 'percent': 9.0},
{'text': 'sun', 'size': 1, 'percent': 5.0},
{'text': 'dog', 'size': 6, 'percent': 27.0},
{'text': 'mom', 'size': 1, 'percent': 5.0},
{'text': 'cat', 'size': 12, 'percent': 54.0}]
)
self.assertEqual(
100.0,
sum(i['percent'] for i in response['top_words']))
| agpl-3.0 |
xinjiguaike/edx-platform | lms/djangoapps/verify_student/admin.py | 48 | 1958 | from ratelimitbackend import admin
from verify_student.models import (
SoftwareSecurePhotoVerification,
VerificationStatus,
SkippedReverification,
)
class SoftwareSecurePhotoVerificationAdmin(admin.ModelAdmin):
"""
Admin for the SoftwareSecurePhotoVerification table.
"""
list_display = ('id', 'user', 'status', 'receipt_id', 'submitted_at', 'updated_at')
raw_id_fields = ('user', 'reviewing_user')
search_fields = (
'receipt_id',
)
class VerificationStatusAdmin(admin.ModelAdmin):
"""
Admin for the VerificationStatus table.
"""
list_display = ('timestamp', 'user', 'status', 'checkpoint')
readonly_fields = ()
search_fields = ('checkpoint__checkpoint_location', 'user__username')
raw_id_fields = ('user',)
def get_readonly_fields(self, request, obj=None):
"""When editing an existing record, all fields should be read-only.
VerificationStatus records should be immutable; to change the user's
status, create a new record with the updated status and a more
recent timestamp.
"""
if obj:
return self.readonly_fields + ('status', 'checkpoint', 'user', 'response', 'error')
return self.readonly_fields
class SkippedReverificationAdmin(admin.ModelAdmin):
"""Admin for the SkippedReverification table. """
list_display = ('created_at', 'user', 'course_id', 'checkpoint')
raw_id_fields = ('user',)
readonly_fields = ('user', 'course_id')
search_fields = ('user__username', 'course_id', 'checkpoint__checkpoint_location')
def has_add_permission(self, request):
"""Skipped verifications can't be created in Django admin. """
return False
admin.site.register(SoftwareSecurePhotoVerification, SoftwareSecurePhotoVerificationAdmin)
admin.site.register(SkippedReverification, SkippedReverificationAdmin)
admin.site.register(VerificationStatus, VerificationStatusAdmin)
| agpl-3.0 |
YangSongzhou/django | django/conf/locale/nb/formats.py | 504 | 1766 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
# Kept ISO formats as they are in first position
DATE_INPUT_FORMATS = [
'%Y-%m-%d', '%d.%m.%Y', '%d.%m.%y', # '2006-10-25', '25.10.2006', '25.10.06'
# '%d. %b %Y', '%d %b %Y', # '25. okt 2006', '25 okt 2006'
# '%d. %b. %Y', '%d %b. %Y', # '25. okt. 2006', '25 okt. 2006'
# '%d. %B %Y', '%d %B %Y', # '25. oktober 2006', '25 oktober 2006'
]
DATETIME_INPUT_FORMATS = [
'%Y-%m-%d %H:%M:%S', # '2006-10-25 14:30:59'
'%Y-%m-%d %H:%M:%S.%f', # '2006-10-25 14:30:59.000200'
'%Y-%m-%d %H:%M', # '2006-10-25 14:30'
'%Y-%m-%d', # '2006-10-25'
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
'%d.%m.%y %H:%M:%S', # '25.10.06 14:30:59'
'%d.%m.%y %H:%M:%S.%f', # '25.10.06 14:30:59.000200'
'%d.%m.%y %H:%M', # '25.10.06 14:30'
'%d.%m.%y', # '25.10.06'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
westinedu/wrgroups | django/db/backends/__init__.py | 77 | 31617 | import decimal
try:
import thread
except ImportError:
import dummy_thread as thread
from threading import local
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import util
from django.db.transaction import TransactionManagementError
from django.utils import datetime_safe
from django.utils.importlib import import_module
class BaseDatabaseWrapper(local):
"""
Represents a database connection.
"""
ops = None
vendor = 'unknown'
def __init__(self, settings_dict, alias=DEFAULT_DB_ALIAS):
# `settings_dict` should be a dictionary containing keys such as
# NAME, USER, etc. It's called `settings_dict` instead of `settings`
# to disambiguate it from Django settings modules.
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
def __eq__(self, other):
return self.alias == other.alias
def __ne__(self, other):
return not self == other
def _commit(self):
if self.connection is not None:
return self.connection.commit()
def _rollback(self):
if self.connection is not None:
return self.connection.rollback()
def _enter_transaction_management(self, managed):
"""
A hook for backend-specific changes required when entering manual
transaction handling.
"""
pass
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def enter_transaction_management(self, managed=True):
"""
Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is
managed as a stack.
The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false
when no current block is running).
"""
if self.transaction_state:
self.transaction_state.append(self.transaction_state[-1])
else:
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
if self._dirty is None:
self._dirty = False
self._enter_transaction_management(managed)
def leave_transaction_management(self):
"""
Leaves transaction management for a running thread. A dirty flag is carried
over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.)
"""
self._leave_transaction_management(self.is_managed())
if self.transaction_state:
del self.transaction_state[-1]
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
if self._dirty:
self.rollback()
raise TransactionManagementError("Transaction managed block ended with "
"pending COMMIT/ROLLBACK")
self._dirty = False
def is_dirty(self):
"""
Returns True if the current transaction requires a commit for changes to
happen.
"""
return self._dirty
def set_dirty(self):
"""
Sets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether there are open
changes waiting for commit.
"""
if self._dirty is not None:
self._dirty = True
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def set_clean(self):
"""
Resets a dirty flag for the current thread and code streak. This can be used
to decide in a managed block of code to decide whether a commit or rollback
should happen.
"""
if self._dirty is not None:
self._dirty = False
else:
raise TransactionManagementError("This code isn't under transaction management")
self.clean_savepoints()
def clean_savepoints(self):
self.savepoint_state = 0
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self._commit()
self.set_clean()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
if not self.is_managed():
self._rollback()
else:
self.set_dirty()
def commit(self):
"""
Does the commit itself and resets the dirty flag.
"""
self._commit()
self.set_clean()
def rollback(self):
"""
This function does the rollback itself and resets the dirty flag.
"""
self._rollback()
self.set_clean()
def savepoint(self):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
thread_ident = thread.get_ident()
self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state)
self._savepoint(sid)
return sid
def savepoint_rollback(self, sid):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_rollback(sid)
def savepoint_commit(self, sid):
"""
Commits the most recent savepoint (if one exists). Does nothing if
savepoints are not supported.
"""
if self.savepoint_state:
self._savepoint_commit(sid)
def close(self):
if self.connection is not None:
self.connection.close()
self.connection = None
def cursor(self):
if (self.use_debug_cursor or
(self.use_debug_cursor is None and settings.DEBUG)):
cursor = self.make_debug_cursor(self._cursor())
else:
cursor = util.CursorWrapper(self._cursor(), self)
return cursor
def make_debug_cursor(self, cursor):
return util.CursorDebugWrapper(cursor, self)
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
# True if django.db.backend.utils.typecast_timestamp is used on values
# returned from dates() calls.
needs_datetime_string_cast = True
empty_fetchmany_value = []
update_can_self_select = True
# Does the backend distinguish between '' and None?
interprets_empty_strings_as_nulls = False
# Does the backend allow inserting duplicate rows when a unique_together
# constraint exists, but one of the unique_together columns is NULL?
ignores_nulls_in_unique_constraints = True
can_use_chunked_reads = True
can_return_id_from_insert = False
uses_autocommit = False
uses_savepoints = False
# If True, don't use integer foreign keys referring to, e.g., positive
# integer primary keys.
related_fields_match_type = False
allow_sliced_subqueries = True
supports_joins = True
distinguishes_insert_from_update = True
supports_deleting_related_objects = True
supports_select_related = True
# Does the default test database allow multiple connections?
# Usually an indication that the test database is in-memory
test_db_allows_multiple_connections = True
# Can an object be saved without an explicit primary key?
supports_unspecified_pk = False
# Can a fixture contain forward references? i.e., are
# FK constraints checked at the end of transaction, or
# at the end of each save operation?
supports_forward_references = True
# Does a dirty transaction need to be rolled back
# before the cursor can be used again?
requires_rollback_on_dirty_transaction = False
# Does the backend allow very long model names without error?
supports_long_model_names = True
# Is there a REAL datatype in addition to floats/doubles?
has_real_datatype = False
supports_subqueries_in_group_by = True
supports_bitwise_or = True
# Do time/datetime fields have microsecond precision?
supports_microsecond_precision = True
# Does the __regex lookup support backreferencing and grouping?
supports_regex_backreferencing = True
# Can date/datetime lookups be performed using a string?
supports_date_lookup_using_string = True
# Can datetimes with timezones be used?
supports_timezones = True
# When performing a GROUP BY, is an ORDER BY NULL required
# to remove any ordering?
requires_explicit_null_ordering_when_grouping = False
# Is there a 1000 item limit on query parameters?
supports_1000_query_parameters = True
# Can an object have a primary key of 0? MySQL says No.
allows_primary_key_0 = True
# Do we need to NULL a ForeignKey out, or can the constraint check be
# deferred
can_defer_constraint_checks = False
# date_interval_sql can properly handle mixed Date/DateTime fields and timedeltas
supports_mixed_date_datetime_comparisons = True
# Features that need to be confirmed at runtime
# Cache whether the confirmation has been performed.
_confirmed = False
supports_transactions = None
supports_stddev = None
can_introspect_foreign_keys = None
def __init__(self, connection):
self.connection = connection
def confirm(self):
"Perform manual checks of any database features that might vary between installs"
self._confirmed = True
self.supports_transactions = self._supports_transactions()
self.supports_stddev = self._supports_stddev()
self.can_introspect_foreign_keys = self._can_introspect_foreign_keys()
def _supports_transactions(self):
"Confirm support for transactions"
cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection._commit()
cursor.execute('INSERT INTO ROLLBACK_TEST (X) VALUES (8)')
self.connection._rollback()
cursor.execute('SELECT COUNT(X) FROM ROLLBACK_TEST')
count, = cursor.fetchone()
cursor.execute('DROP TABLE ROLLBACK_TEST')
self.connection._commit()
return count == 0
def _supports_stddev(self):
"Confirm support for STDDEV and related stats functions"
class StdDevPop(object):
sql_function = 'STDDEV_POP'
try:
self.connection.ops.check_aggregate_support(StdDevPop())
except NotImplementedError:
self.supports_stddev = False
def _can_introspect_foreign_keys(self):
"Confirm support for introspected foreign keys"
# Every database can do this reliably, except MySQL,
# which can't do it for MyISAM tables
return True
class BaseDatabaseOperations(object):
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
"""
compiler_module = "django.db.models.sql.compiler"
def __init__(self):
self._cache = None
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
"""
return None
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError()
def date_interval_sql(self, sql, connector, timedelta):
"""
Implements the date interval functionality for expressions
"""
raise NotImplementedError()
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
truncates the given date field field_name to a DATE object with only
the given specificity.
"""
raise NotImplementedError()
def datetime_cast_sql(self):
"""
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name.
"""
return "%s"
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
"""
return ''
def drop_foreignkey_sql(self):
"""
Returns the SQL command that drops a foreign key.
"""
return "DROP CONSTRAINT"
def drop_sequence_sql(self, table):
"""
Returns any SQL necessary to drop the sequence for the given table.
Returns None if no SQL is necessary.
"""
return None
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), returns the SQL necessary
to cast it before using it in a WHERE statement. Note that the
resulting string should contain a '%s' placeholder for the column being
searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
"""
return []
def fulltext_search_sql(self, field_name):
"""
Returns the SQL WHERE clause to use in order to perform a full-text
search of the given field_name. Note that the resulting string should
contain a '%s' placeholder for the value being searched against.
"""
raise NotImplementedError('Full-text search is not implemented for this database backend')
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
"""
from django.utils.encoding import smart_unicode, force_unicode
# Convert params to contain Unicode values.
to_unicode = lambda s: force_unicode(s, strings_only=True, errors='replace')
if isinstance(params, (list, tuple)):
u_params = tuple([to_unicode(val) for val in params])
else:
u_params = dict([(to_unicode(k), to_unicode(v)) for k, v in params.items()])
return smart_unicode(sql) % u_params
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
This method also receives the table name and the name of the primary-key
column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
raise NotImplementedError
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
if self._cache is None:
self._cache = import_module(self.compiler_module)
return getattr(self._cache, compiler_name)
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
not quote the given name if it's already been quoted.
"""
raise NotImplementedError()
def random_function_sql(self):
"""
Returns a SQL expression that returns a random value.
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
raise NotImplementedError
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
"""
raise NotImplementedError
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
raise NotImplementedError()
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
return "BEGIN;"
def end_transaction_sql(self, success=True):
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be appended to tables or rows to define
a tablespace. Returns '' if the backend doesn't use tablespaces.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
from django.utils.encoding import smart_unicode
return smart_unicode(x).replace("\\", "\\\\").replace("%", "\%").replace("_", "\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
# need not necessarily be implemented using "LIKE" in the backend.
prep_for_iexact_query = prep_for_like_query
def value_to_db_auto(self, value):
"""
Transform a value to an object compatible with the auto field required
by the backend driver for auto columns.
"""
if value is None:
return None
return int(value)
def value_to_db_date(self, value):
"""
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
return None
return datetime_safe.new_date(value).strftime('%Y-%m-%d')
def value_to_db_datetime(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_time(self, value):
"""
Transform a datetime value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
return None
return unicode(value)
def value_to_db_decimal(self, value, max_digits, decimal_places):
"""
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
if value is None:
return None
return util.format_number(value, max_digits, decimal_places)
def year_lookup_bounds(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a field value using a year lookup
`value` is an int, containing the looked-up year.
"""
first = '%s-01-01 00:00:00'
second = '%s-12-31 23:59:59.999999'
return [first % value, second % value]
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year lookup
`value` is an int, containing the looked-up year.
By default, it just calls `self.year_lookup_bounds`. Some backends need
this hook because on their DB date fields can't be compared to values
which include a time part.
"""
return self.year_lookup_bounds(value)
def convert_values(self, value, field):
"""Coerce the value returned by the database backend into a consistent type that
is compatible with the field type.
"""
internal_type = field.get_internal_type()
if internal_type == 'DecimalField':
return value
elif internal_type and internal_type.endswith('IntegerField') or internal_type == 'AutoField':
return int(value)
elif internal_type in ('DateField', 'DateTimeField', 'TimeField'):
return value
# No field, or the field isn't known to be a decimal or integer
# Default to a float
return float(value)
def check_aggregate_support(self, aggregate_func):
"""Check that the backend supports the provided aggregate
This is used on specific backends to rule out known aggregates
that are known to have faulty implementations. If the named
aggregate function has a known problem, the backend should
raise NotImplemented.
"""
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
class BaseDatabaseIntrospection(object):
"""
This class encapsulates all backend-specific introspection utilities
"""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
return name
def table_names(self):
"Returns a list of names of all tables that exist in the database."
cursor = self.connection.cursor()
return self.get_table_list(cursor)
def django_table_names(self, only_existing=False):
"""
Returns a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
"""
from django.db import models, router
tables = set()
for app in models.get_apps():
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
existing_tables = self.table_names()
tables = [
t
for t in tables
if self.table_name_converter(t) in existing_tables
]
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
from django.db import models, router
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
all_models.append(model)
tables = map(self.table_name_converter, tables)
return set([
m for m in all_models
if self.table_name_converter(m._meta.db_table) in tables
])
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
from django.db import models, router
apps = models.get_apps()
sequence_list = []
for app in apps:
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.local_many_to_many:
# If this is an m2m using an intermediate table,
# we don't need to reset the sequence.
if f.rel.through is None:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
class BaseDatabaseClient(object):
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None
def __init__(self, connection):
# connection is an instance of BaseDatabaseWrapper.
self.connection = connection
def runshell(self):
raise NotImplementedError()
class BaseDatabaseValidation(object):
"""
This class encapsualtes all backend-specific model validation.
"""
def __init__(self, connection):
self.connection = connection
def validate_field(self, errors, opts, f):
"By default, there is no backend-specific validation"
pass
| bsd-3-clause |
PolicyStat/django | django/utils/translation/__init__.py | 118 | 7000 | """
Internationalization support.
"""
from __future__ import unicode_literals
import re
from django.utils.decorators import ContextDecorator
from django.utils.encoding import force_text
from django.utils.functional import lazy
from django.utils import six
__all__ = [
'activate', 'deactivate', 'override', 'deactivate_all',
'get_language', 'get_language_from_request',
'get_language_info', 'get_language_bidi',
'check_for_language', 'to_locale', 'templatize', 'string_concat',
'gettext', 'gettext_lazy', 'gettext_noop',
'ugettext', 'ugettext_lazy', 'ugettext_noop',
'ngettext', 'ngettext_lazy',
'ungettext', 'ungettext_lazy',
'pgettext', 'pgettext_lazy',
'npgettext', 'npgettext_lazy',
'LANGUAGE_SESSION_KEY',
]
LANGUAGE_SESSION_KEY = '_language'
class TranslatorCommentWarning(SyntaxWarning):
pass
# Here be dragons, so a short explanation of the logic won't hurt:
# We are trying to solve two problems: (1) access settings, in particular
# settings.USE_I18N, as late as possible, so that modules can be imported
# without having to first configure Django, and (2) if some other code creates
# a reference to one of these functions, don't break that reference when we
# replace the functions with their real counterparts (once we do access the
# settings).
class Trans(object):
"""
The purpose of this class is to store the actual translation function upon
receiving the first call to that function. After this is done, changes to
USE_I18N will have no effect to which function is served upon request. If
your tests rely on changing USE_I18N, you can delete all the functions
from _trans.__dict__.
Note that storing the function with setattr will have a noticeable
performance effect, as access to the function goes the normal path,
instead of using __getattr__.
"""
def __getattr__(self, real_name):
from django.conf import settings
if settings.USE_I18N:
from django.utils.translation import trans_real as trans
else:
from django.utils.translation import trans_null as trans
setattr(self, real_name, getattr(trans, real_name))
return getattr(trans, real_name)
_trans = Trans()
# The Trans class is no more needed, so remove it from the namespace.
del Trans
def gettext_noop(message):
return _trans.gettext_noop(message)
ugettext_noop = gettext_noop
def gettext(message):
return _trans.gettext(message)
def ngettext(singular, plural, number):
return _trans.ngettext(singular, plural, number)
def ugettext(message):
return _trans.ugettext(message)
def ungettext(singular, plural, number):
return _trans.ungettext(singular, plural, number)
def pgettext(context, message):
return _trans.pgettext(context, message)
def npgettext(context, singular, plural, number):
return _trans.npgettext(context, singular, plural, number)
gettext_lazy = lazy(gettext, str)
ugettext_lazy = lazy(ugettext, six.text_type)
pgettext_lazy = lazy(pgettext, six.text_type)
def lazy_number(func, resultclass, number=None, **kwargs):
if isinstance(number, six.integer_types):
kwargs['number'] = number
proxy = lazy(func, resultclass)(**kwargs)
else:
class NumberAwareString(resultclass):
def __mod__(self, rhs):
if isinstance(rhs, dict) and number:
try:
number_value = rhs[number]
except KeyError:
raise KeyError('Your dictionary lacks key \'%s\'. '
'Please provide it, because it is required to '
'determine whether string is singular or plural.'
% number)
else:
number_value = rhs
kwargs['number'] = number_value
translated = func(**kwargs)
try:
translated = translated % rhs
except TypeError:
# String doesn't contain a placeholder for the number
pass
return translated
proxy = lazy(lambda **kwargs: NumberAwareString(), NumberAwareString)(**kwargs)
return proxy
def ngettext_lazy(singular, plural, number=None):
return lazy_number(ngettext, str, singular=singular, plural=plural, number=number)
def ungettext_lazy(singular, plural, number=None):
return lazy_number(ungettext, six.text_type, singular=singular, plural=plural, number=number)
def npgettext_lazy(context, singular, plural, number=None):
return lazy_number(npgettext, six.text_type, context=context, singular=singular, plural=plural, number=number)
def activate(language):
return _trans.activate(language)
def deactivate():
return _trans.deactivate()
class override(ContextDecorator):
def __init__(self, language, deactivate=False):
self.language = language
self.deactivate = deactivate
def __enter__(self):
self.old_language = get_language()
if self.language is not None:
activate(self.language)
else:
deactivate_all()
def __exit__(self, exc_type, exc_value, traceback):
if self.deactivate:
deactivate()
else:
activate(self.old_language)
def get_language():
return _trans.get_language()
def get_language_bidi():
return _trans.get_language_bidi()
def check_for_language(lang_code):
return _trans.check_for_language(lang_code)
def to_locale(language):
return _trans.to_locale(language)
def get_language_from_request(request, check_path=False):
return _trans.get_language_from_request(request, check_path)
def get_language_from_path(path):
return _trans.get_language_from_path(path)
def templatize(src, origin=None):
return _trans.templatize(src, origin)
def deactivate_all():
return _trans.deactivate_all()
def _string_concat(*strings):
"""
Lazy variant of string concatenation, needed for translations that are
constructed from multiple parts.
"""
return ''.join(force_text(s) for s in strings)
string_concat = lazy(_string_concat, six.text_type)
def get_language_info(lang_code):
from django.conf.locale import LANG_INFO
try:
lang_info = LANG_INFO[lang_code]
if 'fallback' in lang_info and 'name' not in lang_info:
return get_language_info(lang_info['fallback'][0])
return lang_info
except KeyError:
if '-' not in lang_code:
raise KeyError("Unknown language code %s." % lang_code)
generic_lang_code = lang_code.split('-')[0]
try:
return LANG_INFO[generic_lang_code]
except KeyError:
raise KeyError("Unknown language code %s and %s." % (lang_code, generic_lang_code))
trim_whitespace_re = re.compile('\s*\n\s*')
def trim_whitespace(s):
return trim_whitespace_re.sub(' ', s.strip())
| bsd-3-clause |
MLAB-project/MLAB-I2c-modules | src/pymlab/sensors/lts.py | 2 | 1908 | #!/usr/bin/python
# Python library for LTS01A MLAB module with MAX31725 i2c Local Temperature Sensor
#
# print "LTS01A status", bin(get_config(I2C_bus_number, LTS01A_address))
# Return statust register value
# print "LTS01A temp", get_temp(I2C_bus_number, LTS01A_address)
# return temperature.
import struct
import logging
from pymlab.sensors import Device
LOGGER = logging.getLogger(__name__)
class LTS01(Device):
"""
Example:
.. code-block:: python
# Python library for LTS01A MLAB module with MAX31725 i2c Local Temperature Sensor
"""
FAULTS = {
1: [0b00],
2: [0b01],
4: [0b10],
6: [0b11],
}
def __init__(self, parent = None, address = 0x48, fault_queue = 1, **kwargs):
Device.__init__(self, parent, address, **kwargs)
## register definitions
self.Reg_temp = 0x00
self.Reg_conf = 0x01
self.Reg_Thys = 0x02
self.Reg_Tos = 0x03
## config parameters
self.SHUTDOWN = (1 << 0)
self.INTERRUPT_Mode = (1 << 1)
self.COMPARATOR_Mode = (0 << 1)
self.OS_POLARITY_1 = (1 << 2)
self.OS_POLARITY_0 = (0 << 2)
# self.FQ_num = (int(self.FAULTS[fault_queue]) << 3)
self.FORMAT_2complement = (0 << 5)
self.FORMAT_extended = (1 << 5)
self.Timeout_on = (0 << 6)
self.Timeout_off = (1 << 6)
def initialize(self):
setup = 0x00
self.bus.write_byte_data(self.address, self.Reg_conf, setup)
LOGGER.debug("LTS sensor initialized. ",)
return self.bus.read_byte_data(self.address,0x01);
def get_temp(self):
# self.bus.write_byte(self.address,0x00)
temp = self.bus.read_int16_data(self.address, self.Reg_temp) / 256.0
#temperature calculation register_value * 0.00390625; (Sensor is a big-endian but SMBus is little-endian by default)
return temp
| gpl-3.0 |
amirrpp/django-oscar | src/oscar/apps/offer/migrations/0001_initial.py | 52 | 15207 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields.autoslugfield
from decimal import Decimal
import oscar.models.fields
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('catalogue', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Benefit',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(verbose_name='Type', max_length=128, blank=True, choices=[('Percentage', "Discount is a percentage off of the product's value"), ('Absolute', "Discount is a fixed amount off of the product's value"), ('Multibuy', 'Discount is to give the cheapest product for free'), ('Fixed price', 'Get the products that meet the condition for a fixed price'), ('Shipping absolute', 'Discount is a fixed amount of the shipping cost'), ('Shipping fixed price', 'Get shipping for a fixed price'), ('Shipping percentage', 'Discount is a percentage off of the shipping cost')])),
('value', oscar.models.fields.PositiveDecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Value', null=True)),
('max_affected_items', models.PositiveIntegerField(verbose_name='Max Affected Items', blank=True, help_text='Set this to prevent the discount consuming all items within the range that are in the basket.', null=True)),
('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)),
],
options={
'verbose_name_plural': 'Benefits',
'verbose_name': 'Benefit',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Condition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('type', models.CharField(verbose_name='Type', max_length=128, blank=True, choices=[('Count', 'Depends on number of items in basket that are in condition range'), ('Value', 'Depends on value of items in basket that are in condition range'), ('Coverage', 'Needs to contain a set number of DISTINCT items from the condition range')])),
('value', oscar.models.fields.PositiveDecimalField(max_digits=12, decimal_places=2, blank=True, verbose_name='Value', null=True)),
('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)),
],
options={
'verbose_name_plural': 'Conditions',
'verbose_name': 'Condition',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ConditionalOffer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(verbose_name='Name', unique=True, max_length=128, help_text="This is displayed within the customer's basket")),
('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)),
('description', models.TextField(verbose_name='Description', help_text='This is displayed on the offer browsing page', blank=True)),
('offer_type', models.CharField(default='Site', max_length=128, verbose_name='Type', choices=[('Site', 'Site offer - available to all users'), ('Voucher', 'Voucher offer - only available after entering the appropriate voucher code'), ('User', 'User offer - available to certain types of user'), ('Session', 'Session offer - temporary offer, available for a user for the duration of their session')])),
('status', models.CharField(default='Open', max_length=64, verbose_name='Status')),
('priority', models.IntegerField(default=0, verbose_name='Priority', help_text='The highest priority offers are applied first')),
('start_datetime', models.DateTimeField(blank=True, verbose_name='Start date', null=True)),
('end_datetime', models.DateTimeField(verbose_name='End date', blank=True, help_text="Offers are active until the end of the 'end date'", null=True)),
('max_global_applications', models.PositiveIntegerField(verbose_name='Max global applications', blank=True, help_text='The number of times this offer can be used before it is unavailable', null=True)),
('max_user_applications', models.PositiveIntegerField(verbose_name='Max user applications', blank=True, help_text='The number of times a single user can use this offer', null=True)),
('max_basket_applications', models.PositiveIntegerField(verbose_name='Max basket applications', blank=True, help_text='The number of times this offer can be applied to a basket (and order)', null=True)),
('max_discount', models.DecimalField(verbose_name='Max discount', max_digits=12, decimal_places=2, null=True, help_text='When an offer has given more discount to orders than this threshold, then the offer becomes unavailable', blank=True)),
('total_discount', models.DecimalField(default=Decimal('0.00'), max_digits=12, decimal_places=2, verbose_name='Total Discount')),
('num_applications', models.PositiveIntegerField(default=0, verbose_name='Number of applications')),
('num_orders', models.PositiveIntegerField(default=0, verbose_name='Number of Orders')),
('redirect_url', oscar.models.fields.ExtendedURLField(verbose_name='URL redirect (optional)', blank=True)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('benefit', models.ForeignKey(verbose_name='Benefit', to='offer.Benefit')),
('condition', models.ForeignKey(verbose_name='Condition', to='offer.Condition')),
],
options={
'ordering': ['-priority'],
'verbose_name_plural': 'Conditional offers',
'verbose_name': 'Conditional offer',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Range',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(unique=True, max_length=128, verbose_name='Name')),
('slug', oscar.models.fields.autoslugfield.AutoSlugField(populate_from='name', unique=True, verbose_name='Slug', max_length=128, editable=False, blank=True)),
('description', models.TextField(blank=True)),
('is_public', models.BooleanField(default=False, verbose_name='Is public?', help_text='Public ranges have a customer-facing page')),
('includes_all_products', models.BooleanField(default=False, verbose_name='Includes all products?')),
('proxy_class', oscar.models.fields.NullCharField(unique=True, verbose_name='Custom class', default=None, max_length=255)),
('date_created', models.DateTimeField(auto_now_add=True, verbose_name='Date Created')),
('classes', models.ManyToManyField(related_name='classes', verbose_name='Product Types', to='catalogue.ProductClass', blank=True)),
('excluded_products', models.ManyToManyField(related_name='excludes', verbose_name='Excluded Products', to='catalogue.Product', blank=True)),
('included_categories', models.ManyToManyField(related_name='includes', verbose_name='Included Categories', to='catalogue.Category', blank=True)),
],
options={
'verbose_name_plural': 'Ranges',
'verbose_name': 'Range',
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RangeProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('display_order', models.IntegerField(default=0)),
('product', models.ForeignKey(to='catalogue.Product')),
('range', models.ForeignKey(to='offer.Range')),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='RangeProductFileUpload',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('filepath', models.CharField(max_length=255, verbose_name='File Path')),
('size', models.PositiveIntegerField(verbose_name='Size')),
('date_uploaded', models.DateTimeField(auto_now_add=True, verbose_name='Date Uploaded')),
('status', models.CharField(default='Pending', max_length=32, verbose_name='Status', choices=[('Pending', 'Pending'), ('Failed', 'Failed'), ('Processed', 'Processed')])),
('error_message', models.CharField(max_length=255, verbose_name='Error Message', blank=True)),
('date_processed', models.DateTimeField(verbose_name='Date Processed', null=True)),
('num_new_skus', models.PositiveIntegerField(verbose_name='Number of New SKUs', null=True)),
('num_unknown_skus', models.PositiveIntegerField(verbose_name='Number of Unknown SKUs', null=True)),
('num_duplicate_skus', models.PositiveIntegerField(verbose_name='Number of Duplicate SKUs', null=True)),
('range', models.ForeignKey(verbose_name='Range', related_name='file_uploads', to='offer.Range')),
('uploaded_by', models.ForeignKey(verbose_name='Uploaded By', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-date_uploaded',),
'verbose_name_plural': 'Range Product Uploaded Files',
'verbose_name': 'Range Product Uploaded File',
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='rangeproduct',
unique_together=set([('range', 'product')]),
),
migrations.AddField(
model_name='range',
name='included_products',
field=models.ManyToManyField(related_name='includes', verbose_name='Included Products', to='catalogue.Product', through='offer.RangeProduct', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='condition',
name='range',
field=models.ForeignKey(null=True, verbose_name='Range', to='offer.Range', blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='benefit',
name='range',
field=models.ForeignKey(null=True, verbose_name='Range', to='offer.Range', blank=True),
preserve_default=True,
),
migrations.CreateModel(
name='AbsoluteDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Absolute discount benefits',
'verbose_name': 'Absolute discount benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='CountCondition',
fields=[
],
options={
'verbose_name_plural': 'Count conditions',
'verbose_name': 'Count condition',
'proxy': True,
},
bases=('offer.condition',),
),
migrations.CreateModel(
name='CoverageCondition',
fields=[
],
options={
'verbose_name_plural': 'Coverage Conditions',
'verbose_name': 'Coverage Condition',
'proxy': True,
},
bases=('offer.condition',),
),
migrations.CreateModel(
name='FixedPriceBenefit',
fields=[
],
options={
'verbose_name_plural': 'Fixed price benefits',
'verbose_name': 'Fixed price benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='MultibuyDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Multibuy discount benefits',
'verbose_name': 'Multibuy discount benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='PercentageDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Percentage discount benefits',
'verbose_name': 'Percentage discount benefit',
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='ShippingBenefit',
fields=[
],
options={
'proxy': True,
},
bases=('offer.benefit',),
),
migrations.CreateModel(
name='ShippingAbsoluteDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Shipping absolute discount benefits',
'verbose_name': 'Shipping absolute discount benefit',
'proxy': True,
},
bases=('offer.shippingbenefit',),
),
migrations.CreateModel(
name='ShippingFixedPriceBenefit',
fields=[
],
options={
'verbose_name_plural': 'Fixed price shipping benefits',
'verbose_name': 'Fixed price shipping benefit',
'proxy': True,
},
bases=('offer.shippingbenefit',),
),
migrations.CreateModel(
name='ShippingPercentageDiscountBenefit',
fields=[
],
options={
'verbose_name_plural': 'Shipping percentage discount benefits',
'verbose_name': 'Shipping percentage discount benefit',
'proxy': True,
},
bases=('offer.shippingbenefit',),
),
migrations.CreateModel(
name='ValueCondition',
fields=[
],
options={
'verbose_name_plural': 'Value conditions',
'verbose_name': 'Value condition',
'proxy': True,
},
bases=('offer.condition',),
),
]
| bsd-3-clause |
habibiefaried/ryu | ryu/tests/unit/ofproto/test_oxm.py | 29 | 5609 | # Copyright (C) 2015 Nippon Telegraph and Telephone Corporation.
# Copyright (C) 2015 YAMAMOTO Takashi <yamamoto at valinux co jp>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import ryu.ofproto.ofproto_v1_3 as ofp
class Test_OXM(unittest.TestCase):
def _test_encode(self, user, on_wire):
(f, uv) = user
(n, v, m) = ofp.oxm_from_user(f, uv)
buf = bytearray()
ofp.oxm_serialize(n, v, m, buf, 0)
self.assertEqual(on_wire, buf)
def _test_decode(self, user, on_wire):
(n, v, m, l) = ofp.oxm_parse(on_wire, 0)
self.assertEqual(len(on_wire), l)
(f, uv) = ofp.oxm_to_user(n, v, m)
self.assertEqual(user, (f, uv))
def _test_encode_header(self, user, on_wire):
f = user
n = ofp.oxm_from_user_header(f)
buf = bytearray()
ofp.oxm_serialize_header(n, buf, 0)
self.assertEqual(on_wire, buf)
def _test_decode_header(self, user, on_wire):
(n, l) = ofp.oxm_parse_header(on_wire, 0)
self.assertEqual(len(on_wire), l)
f = ofp.oxm_to_user_header(n)
self.assertEqual(user, f)
def _test(self, user, on_wire, header_bytes):
self._test_encode(user, on_wire)
self._test_decode(user, on_wire)
if isinstance(user[1], tuple): # has mask?
return
user_header = user[0]
on_wire_header = on_wire[:header_bytes]
self._test_decode_header(user_header, on_wire_header)
if user_header.startswith('field_'):
return # not supported
self._test_encode_header(user_header, on_wire_header)
def test_basic_nomask(self):
user = ('ipv4_src', '192.0.2.1')
on_wire = (
b'\x80\x00\x16\x04'
b'\xc0\x00\x02\x01'
)
self._test(user, on_wire, 4)
def test_basic_mask(self):
user = ('ipv4_src', ('192.0.2.1', '255.255.0.0'))
on_wire = (
b'\x80\x00\x17\x08'
b'\xc0\x00\x02\x01'
b'\xff\xff\x00\x00'
)
self._test(user, on_wire, 4)
def test_exp_nomask(self):
user = ('_dp_hash', 0x12345678)
on_wire = (
b'\xff\xff\x00\x08'
b'\x00\x00\x23\x20' # Nicira
b'\x12\x34\x56\x78'
)
self._test(user, on_wire, 8)
def test_exp_mask(self):
user = ('_dp_hash', (0x12345678, 0x7fffffff))
on_wire = (
b'\xff\xff\x01\x0c'
b'\x00\x00\x23\x20' # Nicira
b'\x12\x34\x56\x78'
b'\x7f\xff\xff\xff'
)
self._test(user, on_wire, 8)
def test_exp_nomask_2(self):
user = ('tcp_flags', 0x876)
on_wire = (
b'\xff\xff\x54\x06'
b'\x4f\x4e\x46\x00' # ONF
b'\x08\x76'
)
self._test(user, on_wire, 8)
def test_exp_mask_2(self):
user = ('tcp_flags', (0x876, 0x7ff))
on_wire = (
b'\xff\xff\x55\x08'
b'\x4f\x4e\x46\x00' # ONF
b'\x08\x76'
b'\x07\xff'
)
self._test(user, on_wire, 8)
def test_exp_nomask_3(self):
user = ('actset_output', 0x98765432)
on_wire = (
b'\xff\xff\x56\x08'
b'\x4f\x4e\x46\x00' # ONF
b'\x98\x76\x54\x32'
)
self._test(user, on_wire, 8)
def test_exp_mask_3(self):
user = ('actset_output', (0x98765432, 0xfffffffe))
on_wire = (
b'\xff\xff\x57\x0c'
b'\x4f\x4e\x46\x00' # ONF
b'\x98\x76\x54\x32'
b'\xff\xff\xff\xfe'
)
self._test(user, on_wire, 8)
def test_nxm_1_nomask(self):
user = ('tun_ipv4_src', '192.0.2.1')
on_wire = (
b'\x00\x01\x3e\x04'
b'\xc0\x00\x02\x01'
)
self._test(user, on_wire, 4)
def test_nxm_1_mask(self):
user = ('tun_ipv4_src', ('192.0.2.1', '255.255.0.0'))
on_wire = (
b'\x00\x01\x3f\x08'
b'\xc0\x00\x02\x01'
b'\xff\xff\x00\x00'
)
self._test(user, on_wire, 4)
def test_ext_256_nomask(self):
user = ('pbb_uca', 50)
on_wire = (
b'\xff\xff\x00\x07'
b'\x4f\x4e\x46\x00' # ONF
b'\x0a\x00'
b'\x32'
)
self._test(user, on_wire, 10)
def test_ext_256_mask(self):
user = ('pbb_uca', (50, 51))
on_wire = (
b'\xff\xff\x01\x08'
b'\x4f\x4e\x46\x00' # ONF
b'\x0a\x00'
b'\x32'
b'\x33'
)
self._test(user, on_wire, 10)
def test_basic_unknown_nomask(self):
user = ('field_100', 'aG9nZWhvZ2U=')
on_wire = (
b'\x00\x00\xc8\x08'
b'hogehoge'
)
self._test(user, on_wire, 4)
def test_basic_unknown_mask(self):
user = ('field_100', ('aG9nZWhvZ2U=', 'ZnVnYWZ1Z2E='))
on_wire = (
b'\x00\x00\xc9\x10'
b'hogehoge'
b'fugafuga'
)
self._test(user, on_wire, 4)
| apache-2.0 |
Mte90/remo | remo/reports/views.py | 1 | 10633 | from django.conf import settings
from django.contrib import messages
from django.contrib.auth.models import User
from django.core.paginator import EmptyPage, InvalidPage, Paginator
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.timezone import now
from django.views.decorators.cache import never_cache
from django_statsd.clients import statsd
import forms
from remo.base.decorators import permission_check
from remo.base.templatetags.helpers import urlparams
from remo.base.utils import month2number
from remo.profiles.models import FunctionalArea, UserProfile
from remo.reports import ACTIVITY_CAMPAIGN, UNLISTED_ACTIVITIES
from remo.reports.models import NGReport, NGReportComment
# New reporting system
LIST_NG_REPORTS_DEFAULT_SORT = 'created_date_desc'
LIST_NG_REPORTS_VALID_SORTS = {
'reporter_desc': '-user__last_name,user__first_name',
'reporter_asc': 'user__last_name,user__first_name',
'mentor_desc': '-mentor__last_name,mentor__first_name',
'mentor_asc': 'mentor__last_name,mentor__first_name',
'activity_desc': '-activity__name',
'activity_asc': 'activity__name',
'report_date_desc': '-report_date',
'report_date_asc': 'report_date',
'created_date_desc': '-created_on',
'created_date_asc': 'created_on'}
@never_cache
@permission_check(permissions=['reports.add_ngreport', 'reports.change_ngreport'],
filter_field='display_name', owner_field='user', model=UserProfile)
def edit_ng_report(request, display_name='', year=None, month=None, day=None, id=None):
user = request.user
created = False
initial = {}
if not id:
report = NGReport()
created = True
initial = {'location': '%s, %s, %s' % (user.userprofile.city,
user.userprofile.region,
user.userprofile.country),
'latitude': user.userprofile.lat,
'longitude': user.userprofile.lon}
else:
report = get_object_or_404(NGReport, pk=id, user__userprofile__display_name=display_name)
if not created and report.activity.name in UNLISTED_ACTIVITIES:
messages.warning(request, 'You cannot edit this report.')
return redirect(report.get_absolute_url())
report_form = forms.NGReportForm(request.POST or None, instance=report, initial=initial)
if report_form.is_valid():
if created:
report.user = user
messages.success(request, 'Report successfully created.')
statsd.incr('reports.create_report')
else:
messages.success(request, 'Report successfully updated.')
statsd.incr('reports.edit_report')
report_form.save()
return redirect(report.get_absolute_url())
return render(request, 'edit_ng_report.jinja',
{'report_form': report_form,
'pageuser': user,
'report': report,
'created': created,
'campaign_trigger': ACTIVITY_CAMPAIGN})
def view_ng_report(request, display_name, year, month, day=None, id=None):
if not day and not id:
url = reverse('list_ng_reports_rep', kwargs={'rep': display_name})
return redirect(urlparams(url, year=year, month=month))
user = get_object_or_404(User, userprofile__display_name=display_name)
report = get_object_or_404(NGReport, id=id)
comment_form = forms.NGReportCommentForm()
verification_form = forms.NGVerifyReportForm(instance=report)
editable = False
if (((request.user == user or request.user.has_perm('change_ngreport')) and
(report.activity.name not in UNLISTED_ACTIVITIES))):
editable = True
ctx_data = {'pageuser': user,
'user_profile': user.userprofile,
'report': report,
'editable': editable,
'comment_form': comment_form,
'verification_form': verification_form}
template = 'view_ng_report.jinja'
if request.method == 'POST':
# Process comment form
if 'comment' in request.POST:
comment_form = forms.NGReportCommentForm(request.POST)
if comment_form.is_valid():
if not request.user.is_authenticated():
messages.error(request, 'Permission denied.')
return redirect('main')
obj = comment_form.save(commit=False)
obj.user = request.user
obj.report = report
obj.save()
messages.success(request, 'Comment saved successfully.')
statsd.incr('reports.create_comment')
ctx_data['comment_form'] = forms.NGReportCommentForm()
# Process verification form
else:
verification_form = forms.NGVerifyReportForm(request.POST, instance=report)
if verification_form.is_valid():
if ((not request.user.is_authenticated()) or
(not request.user.groups.filter(
Q(name='Council') | Q(name='Mentor')).exists())):
messages.error(request, 'Permission denied.')
return redirect('main')
if verification_form.cleaned_data['verified_activity']:
messages.success(request, u'Activity verified successfully.')
else:
messages.success(request, u'Activiy invalidated successfully.')
verification_form.save()
ctx_data['verification_form'] = forms.NGVerifyReportForm(instance=report)
return render(request, template, ctx_data)
@never_cache
@permission_check(permissions=['reports.delete_ngreport'],
filter_field='display_name', owner_field='user', model=UserProfile)
def delete_ng_report(request, display_name, year, month, day, id):
user = get_object_or_404(User, userprofile__display_name=display_name)
if request.method == 'POST':
report = get_object_or_404(NGReport, id=id)
report.delete()
messages.success(request, 'Report successfully deleted.')
statsd.incr('reports.delete_report')
if request.user == user:
return redirect('profiles_view_my_profile')
return redirect('profiles_view_profile', display_name=display_name)
@permission_check(permissions=['reports.delete_ngreportcomment'],
filter_field='display_name', owner_field='user', model=UserProfile)
def delete_ng_report_comment(request, display_name, year, month, day, id, comment_id):
report = get_object_or_404(NGReport, pk=id)
if comment_id and request.method == 'POST':
report_comment = get_object_or_404(NGReportComment, pk=comment_id)
report_comment.delete()
messages.success(request, 'Comment successfully deleted.')
statsd.incr('reports.delete_comment')
return redirect(report.get_absolute_url())
def list_ng_reports(request, mentor=None, rep=None, functional_area_slug=None):
today = now().date()
report_list = NGReport.objects.filter(report_date__lte=today)
pageheader = 'Activities for Reps'
user = None
pageuser_is_mentor = False
if mentor or rep:
user = get_object_or_404(User, userprofile__display_name__iexact=mentor or rep)
if mentor:
report_list = report_list.filter(mentor=user)
pageheader += ' mentored by %s' % user.get_full_name()
pageuser_is_mentor = True
elif rep:
report_list = report_list.filter(user=user)
pageheader = 'Activities for %s' % user.get_full_name()
if functional_area_slug:
functional_area = get_object_or_404(FunctionalArea, slug=functional_area_slug)
report_list = report_list.filter(functional_areas=functional_area)
pageheader += ' for area %s' % functional_area.name
month = request.GET.get('month')
year = request.GET.get('year')
if month and year:
try:
month = month2number(month)
# Make sure that year is an integer too
year = int(year)
except (TypeError, ValueError):
raise Http404()
report_list = report_list.filter(report_date__year=year, report_date__month=month)
if 'query' in request.GET:
query = request.GET['query'].strip()
report_list = report_list.filter(
Q(ngreportcomment__comment__icontains=query) |
Q(activity__name__icontains=query) |
Q(activity_description__icontains=query) |
Q(campaign__name__icontains=query) |
Q(functional_areas__name__icontains=query) |
Q(location__icontains=query) |
Q(link__icontains=query) |
Q(link_description__icontains=query) |
Q(user__first_name__icontains=query) |
Q(user__last_name__icontains=query) |
Q(user__userprofile__local_name__icontains=query) |
Q(user__userprofile__display_name__icontains=query) |
Q(mentor__first_name__icontains=query) |
Q(mentor__last_name__icontains=query) |
Q(mentor__userprofile__local_name__icontains=query) |
Q(mentor__userprofile__display_name__icontains=query))
report_list = report_list.distinct()
number_of_reports = report_list.count()
sort_key = request.GET.get('sort_key', LIST_NG_REPORTS_DEFAULT_SORT)
if sort_key not in LIST_NG_REPORTS_VALID_SORTS:
sort_key = LIST_NG_REPORTS_DEFAULT_SORT
sort_by = LIST_NG_REPORTS_VALID_SORTS[sort_key]
report_list = report_list.order_by(*sort_by.split(','))
paginator = Paginator(report_list, settings.ITEMS_PER_PAGE)
# Make sure page request is an int. If not, deliver first page.
try:
page = int(request.GET.get('page', '1'))
except ValueError:
page = 1
# If page request (9999) is out of range, deliver last page of results.
try:
reports = paginator.page(page)
except (EmptyPage, InvalidPage):
reports = paginator.page(paginator.num_pages)
return render(request, 'list_ng_reports.jinja',
{'objects': reports,
'number_of_reports': number_of_reports,
'sort_key': sort_key,
'pageheader': pageheader,
'pageuser': user,
'pageuser_is_mentor': pageuser_is_mentor,
'query': request.GET.get('query', '')})
| bsd-3-clause |
kybriainfotech/iSocioCRM | addons/account_analytic_plans/wizard/account_crossovered_analytic.py | 341 | 2972 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class account_crossovered_analytic(osv.osv_memory):
_name = "account.crossovered.analytic"
_description = "Print Crossovered Analytic"
_columns = {
'date1': fields.date('Start Date', required=True),
'date2': fields.date('End Date', required=True),
'journal_ids': fields.many2many('account.analytic.journal', 'crossovered_journal_rel', 'crossover_id', 'journal_id', 'Analytic Journal'),
'ref': fields.many2one('account.analytic.account', 'Analytic Account Reference', required=True),
'empty_line': fields.boolean('Dont show empty lines'),
}
_defaults = {
'date1': lambda *a: time.strftime('%Y-01-01'),
'date2': lambda *a: time.strftime('%Y-%m-%d'),
}
def print_report(self, cr, uid, ids, context=None):
cr.execute('SELECT account_id FROM account_analytic_line')
res = cr.fetchall()
acc_ids = [x[0] for x in res]
data = self.read(cr, uid, ids, context=context)[0]
data['ref'] = data['ref'][0]
obj_acc = self.pool.get('account.analytic.account').browse(cr, uid, data['ref'], context=context)
name = obj_acc.name
account_ids = self.pool.get('account.analytic.account').search(cr, uid, [('parent_id', 'child_of', [data['ref']])], context=context)
flag = True
for acc in account_ids:
if acc in acc_ids:
flag = False
break
if flag:
raise osv.except_osv(_('User Error!'),_('There are no analytic lines related to account %s.' % name))
datas = {
'ids': [],
'model': 'account.analytic.account',
'form': data
}
return self.pool['report'].get_action(cr, uid, [], 'account_analytic_plans.report_crossoveredanalyticplans', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
anbangleo/NlsdeWeb | Python-3.6.0/Lib/idlelib/idle_test/test_paragraph.py | 7 | 14314 | # Test the functions and main class method of paragraph.py
import unittest
from idlelib import paragraph as fp
from idlelib.editor import EditorWindow
from tkinter import Tk, Text
from test.support import requires
class Is_Get_Test(unittest.TestCase):
"""Test the is_ and get_ functions"""
test_comment = '# This is a comment'
test_nocomment = 'This is not a comment'
trailingws_comment = '# This is a comment '
leadingws_comment = ' # This is a comment'
leadingws_nocomment = ' This is not a comment'
def test_is_all_white(self):
self.assertTrue(fp.is_all_white(''))
self.assertTrue(fp.is_all_white('\t\n\r\f\v'))
self.assertFalse(fp.is_all_white(self.test_comment))
def test_get_indent(self):
Equal = self.assertEqual
Equal(fp.get_indent(self.test_comment), '')
Equal(fp.get_indent(self.trailingws_comment), '')
Equal(fp.get_indent(self.leadingws_comment), ' ')
Equal(fp.get_indent(self.leadingws_nocomment), ' ')
def test_get_comment_header(self):
Equal = self.assertEqual
# Test comment strings
Equal(fp.get_comment_header(self.test_comment), '#')
Equal(fp.get_comment_header(self.trailingws_comment), '#')
Equal(fp.get_comment_header(self.leadingws_comment), ' #')
# Test non-comment strings
Equal(fp.get_comment_header(self.leadingws_nocomment), ' ')
Equal(fp.get_comment_header(self.test_nocomment), '')
class FindTest(unittest.TestCase):
"""Test the find_paragraph function in paragraph module.
Using the runcase() function, find_paragraph() is called with 'mark' set at
multiple indexes before and inside the test paragraph.
It appears that code with the same indentation as a quoted string is grouped
as part of the same paragraph, which is probably incorrect behavior.
"""
@classmethod
def setUpClass(cls):
from idlelib.idle_test.mock_tk import Text
cls.text = Text()
def runcase(self, inserttext, stopline, expected):
# Check that find_paragraph returns the expected paragraph when
# the mark index is set to beginning, middle, end of each line
# up to but not including the stop line
text = self.text
text.insert('1.0', inserttext)
for line in range(1, stopline):
linelength = int(text.index("%d.end" % line).split('.')[1])
for col in (0, linelength//2, linelength):
tempindex = "%d.%d" % (line, col)
self.assertEqual(fp.find_paragraph(text, tempindex), expected)
text.delete('1.0', 'end')
def test_find_comment(self):
comment = (
"# Comment block with no blank lines before\n"
"# Comment line\n"
"\n")
self.runcase(comment, 3, ('1.0', '3.0', '#', comment[0:58]))
comment = (
"\n"
"# Comment block with whitespace line before and after\n"
"# Comment line\n"
"\n")
self.runcase(comment, 4, ('2.0', '4.0', '#', comment[1:70]))
comment = (
"\n"
" # Indented comment block with whitespace before and after\n"
" # Comment line\n"
"\n")
self.runcase(comment, 4, ('2.0', '4.0', ' #', comment[1:82]))
comment = (
"\n"
"# Single line comment\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:23]))
comment = (
"\n"
" # Single line comment with leading whitespace\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:51]))
comment = (
"\n"
"# Comment immediately followed by code\n"
"x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:40]))
comment = (
"\n"
" # Indented comment immediately followed by code\n"
"x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', ' #', comment[1:53]))
comment = (
"\n"
"# Comment immediately followed by indented code\n"
" x = 42\n"
"\n")
self.runcase(comment, 3, ('2.0', '3.0', '#', comment[1:49]))
def test_find_paragraph(self):
teststring = (
'"""String with no blank lines before\n'
'String line\n'
'"""\n'
'\n')
self.runcase(teststring, 4, ('1.0', '4.0', '', teststring[0:53]))
teststring = (
"\n"
'"""String with whitespace line before and after\n'
'String line.\n'
'"""\n'
'\n')
self.runcase(teststring, 5, ('2.0', '5.0', '', teststring[1:66]))
teststring = (
'\n'
' """Indented string with whitespace before and after\n'
' Comment string.\n'
' """\n'
'\n')
self.runcase(teststring, 5, ('2.0', '5.0', ' ', teststring[1:85]))
teststring = (
'\n'
'"""Single line string."""\n'
'\n')
self.runcase(teststring, 3, ('2.0', '3.0', '', teststring[1:27]))
teststring = (
'\n'
' """Single line string with leading whitespace."""\n'
'\n')
self.runcase(teststring, 3, ('2.0', '3.0', ' ', teststring[1:55]))
class ReformatFunctionTest(unittest.TestCase):
"""Test the reformat_paragraph function without the editor window."""
def test_reformat_paragraph(self):
Equal = self.assertEqual
reform = fp.reformat_paragraph
hw = "O hello world"
Equal(reform(' ', 1), ' ')
Equal(reform("Hello world", 20), "Hello world")
# Test without leading newline
Equal(reform(hw, 1), "O\nhello\nworld")
Equal(reform(hw, 6), "O\nhello\nworld")
Equal(reform(hw, 7), "O hello\nworld")
Equal(reform(hw, 12), "O hello\nworld")
Equal(reform(hw, 13), "O hello world")
# Test with leading newline
hw = "\nO hello world"
Equal(reform(hw, 1), "\nO\nhello\nworld")
Equal(reform(hw, 6), "\nO\nhello\nworld")
Equal(reform(hw, 7), "\nO hello\nworld")
Equal(reform(hw, 12), "\nO hello\nworld")
Equal(reform(hw, 13), "\nO hello world")
class ReformatCommentTest(unittest.TestCase):
"""Test the reformat_comment function without the editor window."""
def test_reformat_comment(self):
Equal = self.assertEqual
# reformat_comment formats to a minimum of 20 characters
test_string = (
" \"\"\"this is a test of a reformat for a triple quoted string"
" will it reformat to less than 70 characters for me?\"\"\"")
result = fp.reformat_comment(test_string, 70, " ")
expected = (
" \"\"\"this is a test of a reformat for a triple quoted string will it\n"
" reformat to less than 70 characters for me?\"\"\"")
Equal(result, expected)
test_comment = (
"# this is a test of a reformat for a triple quoted string will "
"it reformat to less than 70 characters for me?")
result = fp.reformat_comment(test_comment, 70, "#")
expected = (
"# this is a test of a reformat for a triple quoted string will it\n"
"# reformat to less than 70 characters for me?")
Equal(result, expected)
class FormatClassTest(unittest.TestCase):
def test_init_close(self):
instance = fp.FormatParagraph('editor')
self.assertEqual(instance.editwin, 'editor')
instance.close()
self.assertEqual(instance.editwin, None)
# For testing format_paragraph_event, Initialize FormatParagraph with
# a mock Editor with .text and .get_selection_indices. The text must
# be a Text wrapper that adds two methods
# A real EditorWindow creates unneeded, time-consuming baggage and
# sometimes emits shutdown warnings like this:
# "warning: callback failed in WindowList <class '_tkinter.TclError'>
# : invalid command name ".55131368.windows".
# Calling EditorWindow._close in tearDownClass prevents this but causes
# other problems (windows left open).
class TextWrapper:
def __init__(self, master):
self.text = Text(master=master)
def __getattr__(self, name):
return getattr(self.text, name)
def undo_block_start(self): pass
def undo_block_stop(self): pass
class Editor:
def __init__(self, root):
self.text = TextWrapper(root)
get_selection_indices = EditorWindow. get_selection_indices
class FormatEventTest(unittest.TestCase):
"""Test the formatting of text inside a Text widget.
This is done with FormatParagraph.format.paragraph_event,
which calls functions in the module as appropriate.
"""
test_string = (
" '''this is a test of a reformat for a triple "
"quoted string will it reformat to less than 70 "
"characters for me?'''\n")
multiline_test_string = (
" '''The first line is under the max width.\n"
" The second line's length is way over the max width. It goes "
"on and on until it is over 100 characters long.\n"
" Same thing with the third line. It is also way over the max "
"width, but FormatParagraph will fix it.\n"
" '''\n")
multiline_test_comment = (
"# The first line is under the max width.\n"
"# The second line's length is way over the max width. It goes on "
"and on until it is over 100 characters long.\n"
"# Same thing with the third line. It is also way over the max "
"width, but FormatParagraph will fix it.\n"
"# The fourth line is short like the first line.")
@classmethod
def setUpClass(cls):
requires('gui')
cls.root = Tk()
editor = Editor(root=cls.root)
cls.text = editor.text.text # Test code does not need the wrapper.
cls.formatter = fp.FormatParagraph(editor).format_paragraph_event
# Sets the insert mark just after the re-wrapped and inserted text.
@classmethod
def tearDownClass(cls):
del cls.text, cls.formatter
cls.root.destroy()
del cls.root
def test_short_line(self):
self.text.insert('1.0', "Short line\n")
self.formatter("Dummy")
self.assertEqual(self.text.get('1.0', 'insert'), "Short line\n" )
self.text.delete('1.0', 'end')
def test_long_line(self):
text = self.text
# Set cursor ('insert' mark) to '1.0', within text.
text.insert('1.0', self.test_string)
text.mark_set('insert', '1.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
# find function includes \n
expected = (
" '''this is a test of a reformat for a triple quoted string will it\n"
" reformat to less than 70 characters for me?'''\n") # yes
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# Select from 1.11 to line end.
text.insert('1.0', self.test_string)
text.tag_add('sel', '1.11', '1.end')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
# selection excludes \n
expected = (
" '''this is a test of a reformat for a triple quoted string will it reformat\n"
" to less than 70 characters for me?'''") # no
self.assertEqual(result, expected)
text.delete('1.0', 'end')
def test_multiple_lines(self):
text = self.text
# Select 2 long lines.
text.insert('1.0', self.multiline_test_string)
text.tag_add('sel', '2.0', '4.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('2.0', 'insert')
expected = (
" The second line's length is way over the max width. It goes on and\n"
" on until it is over 100 characters long. Same thing with the third\n"
" line. It is also way over the max width, but FormatParagraph will\n"
" fix it.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
def test_comment_block(self):
text = self.text
# Set cursor ('insert') to '1.0', within block.
text.insert('1.0', self.multiline_test_comment)
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
expected = (
"# The first line is under the max width. The second line's length is\n"
"# way over the max width. It goes on and on until it is over 100\n"
"# characters long. Same thing with the third line. It is also way over\n"
"# the max width, but FormatParagraph will fix it. The fourth line is\n"
"# short like the first line.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# Select line 2, verify line 1 unaffected.
text.insert('1.0', self.multiline_test_comment)
text.tag_add('sel', '2.0', '3.0')
self.formatter('ParameterDoesNothing', limit=70)
result = text.get('1.0', 'insert')
expected = (
"# The first line is under the max width.\n"
"# The second line's length is way over the max width. It goes on and\n"
"# on until it is over 100 characters long.\n")
self.assertEqual(result, expected)
text.delete('1.0', 'end')
# The following block worked with EditorWindow but fails with the mock.
# Lines 2 and 3 get pasted together even though the previous block left
# the previous line alone. More investigation is needed.
## # Select lines 3 and 4
## text.insert('1.0', self.multiline_test_comment)
## text.tag_add('sel', '3.0', '5.0')
## self.formatter('ParameterDoesNothing')
## result = text.get('3.0', 'insert')
## expected = (
##"# Same thing with the third line. It is also way over the max width,\n"
##"# but FormatParagraph will fix it. The fourth line is short like the\n"
##"# first line.\n")
## self.assertEqual(result, expected)
## text.delete('1.0', 'end')
if __name__ == '__main__':
unittest.main(verbosity=2, exit=2)
| mit |
Raul3212/Compiladores | Analisador Lexico/automato_jav.py | 2 | 15978 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from definicoes import *
from automato import *
def automatoJav():
afd = AFD(84)
#Definindo Rotulos e estados finais
afd.setTokenEstado(1, "ID")
afd.setTokenEstado(2, "IF")
afd.setTokenEstado(3, "ID")
afd.setTokenEstado(4, "ID")
afd.setTokenEstado(5, "INT")
afd.setTokenEstado(6, "ID")
afd.setTokenEstado(7, "ID")
afd.setTokenEstado(8, "ID")
afd.setTokenEstado(9, "ID")
afd.setTokenEstado(10, "ID")
afd.setTokenEstado(11, "STRING")
afd.setTokenEstado(12, "NUM")
afd.setTokenEstado(14, "STR")
afd.setTokenEstado(15, "ID")
afd.setTokenEstado(16, "ID")
afd.setTokenEstado(17, "ID")
afd.setTokenEstado(18, "ELSE")
afd.setTokenEstado(19, "LPAREN")
afd.setTokenEstado(20, "RPAREN")
afd.setTokenEstado(21, "LKEY")
afd.setTokenEstado(22, "RKEY")
afd.setTokenEstado(23, "LBRAC")
afd.setTokenEstado(24, "RBRAC")
afd.setTokenEstado(25, "PNT")
afd.setTokenEstado(26, "COMMA")
afd.setTokenEstado(27, "SEMI")
afd.setTokenEstado(28, "BINOP")
afd.setTokenEstado(29, "BINOP")
afd.setTokenEstado(30, "BINOP")
afd.setTokenEstado(31, "BINOP")
afd.setTokenEstado(32, "BINOP")
afd.setTokenEstado(33, "BINOP")
afd.setTokenEstado(34, "BINOP")
afd.setTokenEstado(35, "BINOP")
afd.setTokenEstado(36, "BINOP")
afd.setTokenEstado(37, "NOT")
afd.setTokenEstado(38, "BINOP")
afd.setTokenEstado(44, "RETURN")
afd.setTokenEstado(45, "BINOP")
afd.setTokenEstado(46, "ID")
afd.setTokenEstado(47, "ID")
afd.setTokenEstado(48, "ID")
afd.setTokenEstado(49, "ID")
afd.setTokenEstado(50, "CLASS")
afd.setTokenEstado(51, "ID")
afd.setTokenEstado(52, "ID")
afd.setTokenEstado(53, "ID")
afd.setTokenEstado(54, "ID")
afd.setTokenEstado(55, "ID")
afd.setTokenEstado(56, "PUBLIC")
afd.setTokenEstado(57, "ID")
afd.setTokenEstado(58, "ID")
afd.setTokenEstado(59, "ID")
afd.setTokenEstado(60, "ID")
afd.setTokenEstado(61, "ID")
afd.setTokenEstado(62, "PRIVATE")
afd.setTokenEstado(63, "ID")
afd.setTokenEstado(64, "ID")
afd.setTokenEstado(65, "ID")
afd.setTokenEstado(66, "ID")
afd.setTokenEstado(67, "ID")
afd.setTokenEstado(68, "ID")
afd.setTokenEstado(69, "PROTECTED")
afd.setTokenEstado(70, "ID")
afd.setTokenEstado(71, "ID")
afd.setTokenEstado(72, "ID")
afd.setTokenEstado(73, "NULL")
afd.setTokenEstado(74, "UNOP")
afd.setTokenEstado(75, "BINOP")
afd.setTokenEstado(76, "UNOP")
afd.setTokenEstado(77, "UNOP")
afd.setTokenEstado(78, "BINOP")
afd.setTokenEstado(79, "ERRO")
afd.setTokenEstado(80, "BINOP")
for i in range(84):
token_i = afd.getTokenEstado(i)
if token_i == "ID":
afd.addTransicao(i, simbolosEspeciais(), 79)
elif token_i == "NUM":
afd.addTransicao(i, letras() + simbolosEspeciais() + ['(', '{', '}', '['], 79)
elif token_i == "STR":
afd.addTransicao(i, letras() + digitos(), 79)
elif token_i == "NOT":
b = operadoresAritmeticos()
b.remove('=')
afd.addTransicao(i, caracteresEspeciais() + simbolosEspeciais() + digitos() + b, 79)
elif token_i == "UNOP":
a = caracteresEspeciais()
a.remove(';')
a.remove(',')
afd.addTransicao(i, a + simbolosEspeciais() + digitos() + operadoresAritmeticos(), 79)
elif token_i == "BINOP":
a = caracteresEspeciais()
a.remove('(')
b = operadoresAritmeticos()
b.remove('=')
c = simbolosEspeciais()
c.remove('"')
afd.addTransicao(i, a + b + c, 79)
elif token_i == "STRING":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + caracteresEspeciais(), 79)
elif token_i == "INT":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + caracteresEspeciais(), 79)
elif token_i == "NULL":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + caracteresEspeciais(), 79)
elif token_i == "PUBLIC":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + caracteresEspeciais(), 79)
elif token_i == "PRIVATE":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + caracteresEspeciais(), 79)
elif token_i == "PROTECTED":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + caracteresEspeciais(), 79)
elif token_i == "IF":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + [')', '{', '}'], 79)
elif token_i == "ELSE":
a = caracteresEspeciais()
a.remove('{')
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + a, 79)
elif token_i == "LPAREN":
a = caracteresEspeciais()
a.remove('(')
a.remove(')')
b = simbolosEspeciais()
b.remove('"')
c = operadoresAritmeticos()
c.remove('+')
c.remove('-')
afd.addTransicao(i, a + b + c, 79)
elif token_i == "RPAREN":
afd.addTransicao(i, simbolosEspeciais() + letras() + ['(', '['], 79)
elif token_i == "LKEY":
a = caracteresEspeciais()
a.remove('(')
b = simbolosEspeciais()
b.remove('"')
afd.addTransicao(i, operadoresAritmeticos() + a + b, 79)
elif token_i == "RKEY":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + letras() + [')', '{', ';'], 79)
elif token_i == "LBRAC":
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + [')', '{', '}'], 79)
elif token_i == "RBRAC":
afd.addTransicao(i, letras() + operadoresAritmeticos() + simbolosEspeciais() + ['(', ')', '{', '}'], 79)
elif token_i == "PNT":
afd.addTransicao(i, alfabeto(), 79)
elif token_i == "SEMI":
a = caracteresEspeciais()
a.remove('(')
b = operadoresAritmeticos()
b.remove('+')
b.remove('-')
b.remove('/')
afd.addTransicao(i, b + simbolosEspeciais() + a, 79)
elif token_i == "COMMA":
a = caracteresEspeciais()
a.remove('(')
b = operadoresAritmeticos()
b.remove('+')
b.remove('-')
c = simbolosEspeciais()
c.remove('"')
afd.addTransicao(i, a + b + c, 79)
elif token_i == "RETURN":
a = caracteresEspeciais()
a.remove('(')
afd.addTransicao(i, operadoresAritmeticos() + simbolosEspeciais() + a, 79)
#Algumas transicoes precisaram ser movidas para depois do processamento de erros
#TRANSICOES do estado 0
a = letras()
a.remove('i')
a.remove('S')
a.remove('r')
a.remove('c')
a.remove('p')
a.remove('n')
afd.addTransicao(0, ['i'], 1)
afd.addTransicao(0, a, 3) #todo o letras exceto i, s e r
afd.addTransicao(0, ['S'], 6)
afd.addTransicao(0, digitos(), 12)
afd.addTransicao(0, ['"'], 13)
afd.addTransicao(0, ['e'], 15)
afd.addTransicao(0, ['('], 19)
afd.addTransicao(0, [')'], 20)
afd.addTransicao(0, ['{'], 21)
afd.addTransicao(0, ['}'], 22)
afd.addTransicao(0, ['['], 23)
afd.addTransicao(0, [']'], 24)
afd.addTransicao(0, ['.'], 25)
afd.addTransicao(0, [','], 26)
afd.addTransicao(0, [';'], 27)
afd.addTransicao(0, ['='], 28)
afd.addTransicao(0, ['+'], 30)
afd.addTransicao(0, ['&'], 31)
afd.addTransicao(0, ['|'], 33)
afd.addTransicao(0, ['<','>'], 35)
afd.addTransicao(0, ['!'], 37)
afd.addTransicao(0, ['r'], 39)
afd.addTransicao(0, ['c'], 46)
afd.addTransicao(0, ['p'], 51)
afd.addTransicao(0, ['n'], 70)
afd.addTransicao(0, ['-'], 75)
afd.addTransicao(0, ['*'], 78)
afd.addTransicao(0, ['/'], 80)
#TRANSICOES do estado 1
a = letras()
a.remove('f')
a.remove('n')
afd.addTransicao(1, ['n'], 4)
afd.addTransicao(1, ['f'], 2)
afd.addTransicao(1, a + digitos(), 3)
#todos os digitos e letras exceto f e n
#TRANSICOES do estado 2
afd.addTransicao(2, letras() + digitos(), 3)
#TRANSICOES do estado 3
afd.addTransicao(3, letras() + digitos(), 3)
#TRANSICOES do estado 4
a = letras()
a.remove('t')
afd.addTransicao(4, ['t'], 5)
afd.addTransicao(4, a + digitos(), 3)
#TRANSICOES do estado 5
afd.addTransicao(5, letras() + digitos(), 3)
#TRANSICOES do estado 6
a = letras()
a.remove('t')
afd.addTransicao(6, ['t'], 7)
afd.addTransicao(6, a + digitos(), 3)
#TRANSICOES do estado 7
a = letras()
a.remove('r')
afd.addTransicao(7, ['r'], 8)
afd.addTransicao(7, a + digitos(), 3)
#TRANSICOES do estado 8
a = letras()
a.remove('i')
afd.addTransicao(8, ['i'], 9)
afd.addTransicao(8, a + digitos(), 3)
#TRANSICOES do estado 9
a = letras()
a.remove('n')
afd.addTransicao(9, ['n'], 10)
afd.addTransicao(9, a + digitos(), 3)
#TRANSICOES do estado 10
a = letras()
a.remove('g')
afd.addTransicao(10, ['g'], 11)
afd.addTransicao(10, a + digitos(), 3)
#TRANSICOES do estado 11
afd.addTransicao(11, letras() + digitos() , 3)
#TRANSICOES do estado 12
afd.addTransicao(12, digitos(), 12)
#TRANSICOES do estado 13
a = alfabeto()
a.remove('"')
afd.addTransicao(13, ['"'], 14)
afd.addTransicao(13, a + [' '], 13)
#TRANSICOES do estado 15
a = letras()
a.remove('l')
afd.addTransicao(15, ['l'], 16)
afd.addTransicao(15, a + digitos() , 3)
#TRANSICOES do estado 16
a = letras()
a.remove('s')
afd.addTransicao(16, ['s'], 17)
afd.addTransicao(16, a + digitos() , 3)
#TRANSICOES do estado 17
a = letras()
a.remove('e')
afd.addTransicao(17, ['e'], 18)
afd.addTransicao(17, a + digitos() , 3)
#TRANSICOES do estado 18
afd.addTransicao(18, letras() + digitos() , 3)
#TRANSICOES do estado 31
afd.addTransicao(31, ['&'], 32)
#TRANSICOES do estado 33
afd.addTransicao(33, ['|'], 34)
#TRANSICOES do estado 35
afd.addTransicao(35, ['='], 36)
#TRANSICOES do estado 37
afd.addTransicao(37, ['='], 38)
#TRANSICOES do estado 39
a = letras()
a.remove('e')
afd.addTransicao(39, ['e'], 40)
afd.addTransicao(39, a + digitos() , 3)
#TRANSICOES do estado 40
a = letras()
a.remove('t')
afd.addTransicao(40, ['t'], 41)
afd.addTransicao(40, a + digitos() , 3)
#TRANSICOES do estado 41
a = letras()
a.remove('u')
afd.addTransicao(41, ['u'], 42)
afd.addTransicao(41, a + digitos() , 3)
#TRANSICOES do estado 42
a = letras()
a.remove('r')
afd.addTransicao(42, ['r'], 43)
afd.addTransicao(42, a + digitos() , 3)
#TRANSICOES do estado 43
a = letras()
a.remove('n')
afd.addTransicao(43, ['n'], 44)
afd.addTransicao(43, a + digitos() , 3)
#TRANSICOES do estado 44
afd.addTransicao(44, letras() + digitos() , 3)
#TRANSICOES do estado 46
a = letras()
a.remove('l')
afd.addTransicao(46, ['l'], 47)
afd.addTransicao(46, a + digitos() , 3)
#TRANSICOES do estado 47
a = letras()
a.remove('a')
afd.addTransicao(47, ['a'], 48)
afd.addTransicao(47, a + digitos() , 3)
#TRANSICOES do estado 48
a = letras()
a.remove('s')
afd.addTransicao(48, ['s'], 49)
afd.addTransicao(48, a + digitos() , 3)
#TRANSICOES do estado 49
a = letras()
a.remove('s')
afd.addTransicao(49, ['s'], 50)
afd.addTransicao(49, a + digitos() , 3)
#TRANSICOES do estado 50
afd.addTransicao(50, letras() + digitos() , 3)
#TRANSICOES do estado 51
a = letras()
a.remove('u')
a.remove('r')
afd.addTransicao(51, ['u'], 52)
afd.addTransicao(51, ['r'], 57)
afd.addTransicao(51, a + digitos() , 3)
#TRANSICOES do estado 52
a = letras()
a.remove('b')
afd.addTransicao(52, ['b'], 53)
afd.addTransicao(52, a + digitos() , 3)
#TRANSICOES do estado 53
a = letras()
a.remove('l')
afd.addTransicao(53, ['l'], 54)
afd.addTransicao(53, a + digitos() , 3)
#TRANSICOES do estado 54
a = letras()
a.remove('i')
afd.addTransicao(54, ['i'], 55)
afd.addTransicao(54, a + digitos() , 3)
#TRANSICOES do estado 55
a = letras()
a.remove('c')
afd.addTransicao(55, ['c'], 56)
afd.addTransicao(55, a + digitos() , 3)
#TRANSICOES do estado 56
afd.addTransicao(56, letras() + digitos() , 3)
#TRANSICOES do estado 57
a = letras()
a.remove('i')
a.remove('o')
afd.addTransicao(57, ['i'], 58)
afd.addTransicao(57, ['o'], 62)
afd.addTransicao(57, a + digitos() , 3)
#TRANSICOES do estado 58
a = letras()
a.remove('v')
afd.addTransicao(58, ['v'], 59)
afd.addTransicao(58, a + digitos() , 3)
#TRANSICOES do estado 59
a = letras()
a.remove('a')
afd.addTransicao(59, ['a'], 60)
afd.addTransicao(59, a + digitos() , 3)
#TRANSICOES do estado 60
a = letras()
a.remove('t')
afd.addTransicao(60, ['t'], 61)
afd.addTransicao(60, a + digitos() , 3)
#TRANSICOES do estado 61
a = letras()
a.remove('e')
afd.addTransicao(61, ['e'], 62)
afd.addTransicao(61, a + digitos() , 3)
#TRANSICOES do estado 62
afd.addTransicao(62, letras() + digitos() , 3)
#TRANSICOES do estado 63
a = letras()
a.remove('t')
afd.addTransicao(63, ['t'], 64)
afd.addTransicao(63, a + digitos() , 3)
#TRANSICOES do estado 64
a = letras()
a.remove('e')
afd.addTransicao(64, ['e'], 65)
afd.addTransicao(64, a + digitos() , 3)
#TRANSICOES do estado 65
a = letras()
a.remove('c')
afd.addTransicao(65, ['c'], 66)
afd.addTransicao(65, a + digitos() , 3)
#TRANSICOES do estado 66
a = letras()
a.remove('t')
afd.addTransicao(66, ['t'], 67)
afd.addTransicao(66, a + digitos() , 3)
#TRANSICOES do estado 67
a = letras()
a.remove('e')
afd.addTransicao(67, ['e'], 68)
afd.addTransicao(67, a + digitos() , 3)
#TRANSICOES do estado 68
a = letras()
a.remove('d')
afd.addTransicao(68, ['d'], 69)
afd.addTransicao(68, a + digitos() , 3)
#TRANSICOES do estado 69
afd.addTransicao(69, letras() + digitos() , 3)
#TRANSICOES do estado 70
a = letras()
a.remove('u')
afd.addTransicao(70, ['u'], 71)
afd.addTransicao(70, a + digitos() , 3)
#TRANSICOES do estado 71
a = letras()
a.remove('l')
afd.addTransicao(71, ['l'], 72)
afd.addTransicao(71, a + digitos() , 3)
#TRANSICOES do estado 72
a = letras()
a.remove('l')
afd.addTransicao(72, ['l'], 73)
afd.addTransicao(72, a + digitos() , 3)
#TRANSICOES do estado 73
afd.addTransicao(73, letras() + digitos() , 3)
#TRANSICOES do estado 81
afd.addTransicao(81, alfabeto() + [' '], 81)
afd.addTransicao(81, ['\n'], 0)
#TRANSICOES do estado 82
afd.addTransicao(82, alfabeto() + [' '], 82)
afd.addTransicao(82, ['*'], 83)
#TRANSICOES do estado 83
a = alfabeto()
a.remove('/')
afd.addTransicao(83, ['/'], 0)
afd.addTransicao(83, a + [' '], 82)
#TRANSICOES do estado 28
afd.addTransicao(28, ['='], 29)
#TRANSICOES do estado 30
afd.addTransicao(30, ['='], 45)
afd.addTransicao(30, ['+'], 74)
#TRANSICOES do estado 75
afd.addTransicao(75, ['='], 45)
afd.addTransicao(75, ['-'], 76)
#TRANSICOES do estado 78
afd.addTransicao(78, ['='], 45)
#TRANSICOES do estado 80
afd.addTransicao(80, ['/'], 81)
afd.addTransicao(80, ['*'], 82)
return afd
| gpl-3.0 |
lzjever/django-guardian | guardian/tests/custompkmodel_test.py | 12 | 1154 |
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from guardian.shortcuts import assign, remove_perm
from guardian.models import User, Group, Permission, AnonymousUser
class CustomPKModelTest(TestCase):
"""
Tests agains custom model with primary key other than *standard*
``id`` integer field.
"""
def setUp(self):
self.user = User.objects.create(username='joe')
self.ctype = ContentType.objects.create(name='foo', model='bar',
app_label='fake-for-guardian-tests')
def test_assign(self):
assign('contenttypes.change_contenttype', self.user, self.ctype)
self.assertTrue(self.user.has_perm('contenttypes.change_contenttype',
self.ctype))
def test_remove_perm(self):
assign('contenttypes.change_contenttype', self.user, self.ctype)
self.assertTrue(self.user.has_perm('contenttypes.change_contenttype',
self.ctype))
remove_perm('contenttypes.change_contenttype', self.user, self.ctype)
self.assertFalse(self.user.has_perm('contenttypes.change_contenttype',
self.ctype))
| bsd-2-clause |
fqez/JdeRobot | src/drivers/MAVLinkServer/MAVProxy/modules/mavproxy_misseditor/me_event.py | 14 | 1762 | #!/usr/bin/env python
'''
Event class and enums for Mission Editor
Michael Day
June 2014
'''
#MissionEditorEvents come FROM the GUI (with a few exceptions where the Mission Editor Module sends a message to itself, e.g., MEE_TIME_TO_QUIT)
#MissionEditorGUIEvents go TO the GUI
#enum for MissionEditorEvent types
MEE_READ_WPS = 0
MEE_WRITE_WPS = 1
MEE_TIME_TO_QUIT = 2
MEE_GET_WP_RAD = 3
MEE_GET_LOIT_RAD = 4
MEE_GET_WP_DEFAULT_ALT = 5
MEE_WRITE_WP_NUM = 6
MEE_LOAD_WP_FILE = 7
MEE_SAVE_WP_FILE = 8
MEE_SET_WP_RAD = 9
MEE_SET_LOIT_RAD = 10
MEE_SET_WP_DEFAULT_ALT = 11
#enum of MissionEditorGUIEvent types
MEGE_CLEAR_MISS_TABLE = 0
MEGE_ADD_MISS_TABLE_ROWS = 1
MEGE_SET_MISS_ITEM = 2
MEGE_SET_WP_RAD = 3
MEGE_SET_LOIT_RAD = 4
MEGE_SET_WP_DEFAULT_ALT = 5
MEGE_SET_LAST_MAP_CLICK_POS = 6
class MissionEditorEvent:
def __init__(self, type, **kwargs):
self.type = type
self.arg_dict = kwargs
if not self.type in [MEE_READ_WPS, MEE_WRITE_WPS, MEGE_CLEAR_MISS_TABLE,
MEGE_ADD_MISS_TABLE_ROWS, MEGE_SET_MISS_ITEM, MEE_TIME_TO_QUIT,
MEE_GET_WP_RAD, MEE_GET_LOIT_RAD, MEGE_SET_WP_RAD, MEGE_SET_LOIT_RAD,
MEE_GET_WP_DEFAULT_ALT, MEGE_SET_WP_DEFAULT_ALT, MEE_WRITE_WP_NUM,
MEE_LOAD_WP_FILE, MEE_SAVE_WP_FILE, MEE_SET_WP_RAD, MEE_SET_LOIT_RAD,
MEE_SET_WP_DEFAULT_ALT]:
raise TypeError("Unrecongized MissionEditorEvent type:" + str(self.type))
def get_type(self):
return self.type
def get_arg(self, key):
if not key in self.arg_dict:
print("No key %s in %s" % (key, str(self.type)))
return None
return self.arg_dict[key]
| gpl-3.0 |
WillisXChen/django-oscar | oscar/lib/python2.7/site-packages/unidecode/x094.py | 252 | 4661 | data = (
'Kui ', # 0x00
'Si ', # 0x01
'Liu ', # 0x02
'Nao ', # 0x03
'Heng ', # 0x04
'Pie ', # 0x05
'Sui ', # 0x06
'Fan ', # 0x07
'Qiao ', # 0x08
'Quan ', # 0x09
'Yang ', # 0x0a
'Tang ', # 0x0b
'Xiang ', # 0x0c
'Jue ', # 0x0d
'Jiao ', # 0x0e
'Zun ', # 0x0f
'Liao ', # 0x10
'Jie ', # 0x11
'Lao ', # 0x12
'Dui ', # 0x13
'Tan ', # 0x14
'Zan ', # 0x15
'Ji ', # 0x16
'Jian ', # 0x17
'Zhong ', # 0x18
'Deng ', # 0x19
'Ya ', # 0x1a
'Ying ', # 0x1b
'Dui ', # 0x1c
'Jue ', # 0x1d
'Nou ', # 0x1e
'Ti ', # 0x1f
'Pu ', # 0x20
'Tie ', # 0x21
'[?] ', # 0x22
'[?] ', # 0x23
'Ding ', # 0x24
'Shan ', # 0x25
'Kai ', # 0x26
'Jian ', # 0x27
'Fei ', # 0x28
'Sui ', # 0x29
'Lu ', # 0x2a
'Juan ', # 0x2b
'Hui ', # 0x2c
'Yu ', # 0x2d
'Lian ', # 0x2e
'Zhuo ', # 0x2f
'Qiao ', # 0x30
'Qian ', # 0x31
'Zhuo ', # 0x32
'Lei ', # 0x33
'Bi ', # 0x34
'Tie ', # 0x35
'Huan ', # 0x36
'Ye ', # 0x37
'Duo ', # 0x38
'Guo ', # 0x39
'Dang ', # 0x3a
'Ju ', # 0x3b
'Fen ', # 0x3c
'Da ', # 0x3d
'Bei ', # 0x3e
'Yi ', # 0x3f
'Ai ', # 0x40
'Zong ', # 0x41
'Xun ', # 0x42
'Diao ', # 0x43
'Zhu ', # 0x44
'Heng ', # 0x45
'Zhui ', # 0x46
'Ji ', # 0x47
'Nie ', # 0x48
'Ta ', # 0x49
'Huo ', # 0x4a
'Qing ', # 0x4b
'Bin ', # 0x4c
'Ying ', # 0x4d
'Kui ', # 0x4e
'Ning ', # 0x4f
'Xu ', # 0x50
'Jian ', # 0x51
'Jian ', # 0x52
'Yari ', # 0x53
'Cha ', # 0x54
'Zhi ', # 0x55
'Mie ', # 0x56
'Li ', # 0x57
'Lei ', # 0x58
'Ji ', # 0x59
'Zuan ', # 0x5a
'Kuang ', # 0x5b
'Shang ', # 0x5c
'Peng ', # 0x5d
'La ', # 0x5e
'Du ', # 0x5f
'Shuo ', # 0x60
'Chuo ', # 0x61
'Lu ', # 0x62
'Biao ', # 0x63
'Bao ', # 0x64
'Lu ', # 0x65
'[?] ', # 0x66
'[?] ', # 0x67
'Long ', # 0x68
'E ', # 0x69
'Lu ', # 0x6a
'Xin ', # 0x6b
'Jian ', # 0x6c
'Lan ', # 0x6d
'Bo ', # 0x6e
'Jian ', # 0x6f
'Yao ', # 0x70
'Chan ', # 0x71
'Xiang ', # 0x72
'Jian ', # 0x73
'Xi ', # 0x74
'Guan ', # 0x75
'Cang ', # 0x76
'Nie ', # 0x77
'Lei ', # 0x78
'Cuan ', # 0x79
'Qu ', # 0x7a
'Pan ', # 0x7b
'Luo ', # 0x7c
'Zuan ', # 0x7d
'Luan ', # 0x7e
'Zao ', # 0x7f
'Nie ', # 0x80
'Jue ', # 0x81
'Tang ', # 0x82
'Shu ', # 0x83
'Lan ', # 0x84
'Jin ', # 0x85
'Qiu ', # 0x86
'Yi ', # 0x87
'Zhen ', # 0x88
'Ding ', # 0x89
'Zhao ', # 0x8a
'Po ', # 0x8b
'Diao ', # 0x8c
'Tu ', # 0x8d
'Qian ', # 0x8e
'Chuan ', # 0x8f
'Shan ', # 0x90
'Ji ', # 0x91
'Fan ', # 0x92
'Diao ', # 0x93
'Men ', # 0x94
'Nu ', # 0x95
'Xi ', # 0x96
'Chai ', # 0x97
'Xing ', # 0x98
'Gai ', # 0x99
'Bu ', # 0x9a
'Tai ', # 0x9b
'Ju ', # 0x9c
'Dun ', # 0x9d
'Chao ', # 0x9e
'Zhong ', # 0x9f
'Na ', # 0xa0
'Bei ', # 0xa1
'Gang ', # 0xa2
'Ban ', # 0xa3
'Qian ', # 0xa4
'Yao ', # 0xa5
'Qin ', # 0xa6
'Jun ', # 0xa7
'Wu ', # 0xa8
'Gou ', # 0xa9
'Kang ', # 0xaa
'Fang ', # 0xab
'Huo ', # 0xac
'Tou ', # 0xad
'Niu ', # 0xae
'Ba ', # 0xaf
'Yu ', # 0xb0
'Qian ', # 0xb1
'Zheng ', # 0xb2
'Qian ', # 0xb3
'Gu ', # 0xb4
'Bo ', # 0xb5
'E ', # 0xb6
'Po ', # 0xb7
'Bu ', # 0xb8
'Ba ', # 0xb9
'Yue ', # 0xba
'Zuan ', # 0xbb
'Mu ', # 0xbc
'Dan ', # 0xbd
'Jia ', # 0xbe
'Dian ', # 0xbf
'You ', # 0xc0
'Tie ', # 0xc1
'Bo ', # 0xc2
'Ling ', # 0xc3
'Shuo ', # 0xc4
'Qian ', # 0xc5
'Liu ', # 0xc6
'Bao ', # 0xc7
'Shi ', # 0xc8
'Xuan ', # 0xc9
'She ', # 0xca
'Bi ', # 0xcb
'Ni ', # 0xcc
'Pi ', # 0xcd
'Duo ', # 0xce
'Xing ', # 0xcf
'Kao ', # 0xd0
'Lao ', # 0xd1
'Er ', # 0xd2
'Mang ', # 0xd3
'Ya ', # 0xd4
'You ', # 0xd5
'Cheng ', # 0xd6
'Jia ', # 0xd7
'Ye ', # 0xd8
'Nao ', # 0xd9
'Zhi ', # 0xda
'Dang ', # 0xdb
'Tong ', # 0xdc
'Lu ', # 0xdd
'Diao ', # 0xde
'Yin ', # 0xdf
'Kai ', # 0xe0
'Zha ', # 0xe1
'Zhu ', # 0xe2
'Xian ', # 0xe3
'Ting ', # 0xe4
'Diu ', # 0xe5
'Xian ', # 0xe6
'Hua ', # 0xe7
'Quan ', # 0xe8
'Sha ', # 0xe9
'Jia ', # 0xea
'Yao ', # 0xeb
'Ge ', # 0xec
'Ming ', # 0xed
'Zheng ', # 0xee
'Se ', # 0xef
'Jiao ', # 0xf0
'Yi ', # 0xf1
'Chan ', # 0xf2
'Chong ', # 0xf3
'Tang ', # 0xf4
'An ', # 0xf5
'Yin ', # 0xf6
'Ru ', # 0xf7
'Zhu ', # 0xf8
'Lao ', # 0xf9
'Pu ', # 0xfa
'Wu ', # 0xfb
'Lai ', # 0xfc
'Te ', # 0xfd
'Lian ', # 0xfe
'Keng ', # 0xff
)
| bsd-3-clause |
cwlseu/ChineseSA | source/thirdpart/jieba/posseg/__init__.py | 8 | 9015 | from __future__ import absolute_import, unicode_literals
import os
import re
import sys
import jieba
import pickle
from .._compat import *
from .viterbi import viterbi
PROB_START_P = "prob_start.p"
PROB_TRANS_P = "prob_trans.p"
PROB_EMIT_P = "prob_emit.p"
CHAR_STATE_TAB_P = "char_state_tab.p"
re_han_detail = re.compile("([\u4E00-\u9FD5]+)")
re_skip_detail = re.compile("([\.0-9]+|[a-zA-Z0-9]+)")
re_han_internal = re.compile("([\u4E00-\u9FD5a-zA-Z0-9+#&\._]+)")
re_skip_internal = re.compile("(\r\n|\s)")
re_eng = re.compile("[a-zA-Z0-9]+")
re_num = re.compile("[\.0-9]+")
re_eng1 = re.compile('^[a-zA-Z0-9]$', re.U)
def load_model():
# For Jython
start_p = pickle.load(get_module_res("posseg", PROB_START_P))
trans_p = pickle.load(get_module_res("posseg", PROB_TRANS_P))
emit_p = pickle.load(get_module_res("posseg", PROB_EMIT_P))
state = pickle.load(get_module_res("posseg", CHAR_STATE_TAB_P))
return state, start_p, trans_p, emit_p
if sys.platform.startswith("java"):
char_state_tab_P, start_P, trans_P, emit_P = load_model()
else:
from .char_state_tab import P as char_state_tab_P
from .prob_start import P as start_P
from .prob_trans import P as trans_P
from .prob_emit import P as emit_P
class pair(object):
def __init__(self, word, flag):
self.word = word
self.flag = flag
def __unicode__(self):
return '%s/%s' % (self.word, self.flag)
def __repr__(self):
return 'pair(%r, %r)' % (self.word, self.flag)
def __str__(self):
if PY2:
return self.__unicode__().encode(default_encoding)
else:
return self.__unicode__()
def __iter__(self):
return iter((self.word, self.flag))
def __lt__(self, other):
return self.word < other.word
def __eq__(self, other):
return isinstance(other, pair) and self.word == other.word and self.flag == other.flag
def __hash__(self):
return hash(self.word)
def encode(self, arg):
return self.__unicode__().encode(arg)
class POSTokenizer(object):
def __init__(self, tokenizer=None):
self.tokenizer = tokenizer or jieba.Tokenizer()
self.load_word_tag(self.tokenizer.get_dict_file())
def __repr__(self):
return '<POSTokenizer tokenizer=%r>' % self.tokenizer
def __getattr__(self, name):
if name in ('cut_for_search', 'lcut_for_search', 'tokenize'):
# may be possible?
raise NotImplementedError
return getattr(self.tokenizer, name)
def initialize(self, dictionary=None):
self.tokenizer.initialize(dictionary)
self.load_word_tag(self.tokenizer.get_dict_file())
def load_word_tag(self, f):
self.word_tag_tab = {}
f_name = resolve_filename(f)
for lineno, line in enumerate(f, 1):
try:
line = line.strip().decode("utf-8")
if not line:
continue
word, _, tag = line.split(" ")
self.word_tag_tab[word] = tag
except Exception:
raise ValueError(
'invalid POS dictionary entry in %s at Line %s: %s' % (f_name, lineno, line))
f.close()
def makesure_userdict_loaded(self):
if self.tokenizer.user_word_tag_tab:
self.word_tag_tab.update(self.tokenizer.user_word_tag_tab)
self.tokenizer.user_word_tag_tab = {}
def __cut(self, sentence):
prob, pos_list = viterbi(
sentence, char_state_tab_P, start_P, trans_P, emit_P)
begin, nexti = 0, 0
for i, char in enumerate(sentence):
pos = pos_list[i][0]
if pos == 'B':
begin = i
elif pos == 'E':
yield pair(sentence[begin:i + 1], pos_list[i][1])
nexti = i + 1
elif pos == 'S':
yield pair(char, pos_list[i][1])
nexti = i + 1
if nexti < len(sentence):
yield pair(sentence[nexti:], pos_list[nexti][1])
def __cut_detail(self, sentence):
blocks = re_han_detail.split(sentence)
for blk in blocks:
if re_han_detail.match(blk):
for word in self.__cut(blk):
yield word
else:
tmp = re_skip_detail.split(blk)
for x in tmp:
if x:
if re_num.match(x):
yield pair(x, 'm')
elif re_eng.match(x):
yield pair(x, 'eng')
else:
yield pair(x, 'x')
def __cut_DAG_NO_HMM(self, sentence):
DAG = self.tokenizer.get_DAG(sentence)
route = {}
self.tokenizer.calc(sentence, DAG, route)
x = 0
N = len(sentence)
buf = ''
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if re_eng1.match(l_word):
buf += l_word
x = y
else:
if buf:
yield pair(buf, 'eng')
buf = ''
yield pair(l_word, self.word_tag_tab.get(l_word, 'x'))
x = y
if buf:
yield pair(buf, 'eng')
buf = ''
def __cut_DAG(self, sentence):
DAG = self.tokenizer.get_DAG(sentence)
route = {}
self.tokenizer.calc(sentence, DAG, route)
x = 0
buf = ''
N = len(sentence)
while x < N:
y = route[x][1] + 1
l_word = sentence[x:y]
if y - x == 1:
buf += l_word
else:
if buf:
if len(buf) == 1:
yield pair(buf, self.word_tag_tab.get(buf, 'x'))
elif not self.tokenizer.FREQ.get(buf):
recognized = self.__cut_detail(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield pair(elem, self.word_tag_tab.get(elem, 'x'))
buf = ''
yield pair(l_word, self.word_tag_tab.get(l_word, 'x'))
x = y
if buf:
if len(buf) == 1:
yield pair(buf, self.word_tag_tab.get(buf, 'x'))
elif not self.tokenizer.FREQ.get(buf):
recognized = self.__cut_detail(buf)
for t in recognized:
yield t
else:
for elem in buf:
yield pair(elem, self.word_tag_tab.get(elem, 'x'))
def __cut_internal(self, sentence, HMM=True):
self.makesure_userdict_loaded()
sentence = strdecode(sentence)
blocks = re_han_internal.split(sentence)
if HMM:
cut_blk = self.__cut_DAG
else:
cut_blk = self.__cut_DAG_NO_HMM
for blk in blocks:
if re_han_internal.match(blk):
for word in cut_blk(blk):
yield word
else:
tmp = re_skip_internal.split(blk)
for x in tmp:
if re_skip_internal.match(x):
yield pair(x, 'x')
else:
for xx in x:
if re_num.match(xx):
yield pair(xx, 'm')
elif re_eng.match(x):
yield pair(xx, 'eng')
else:
yield pair(xx, 'x')
def _lcut_internal(self, sentence):
return list(self.__cut_internal(sentence))
def _lcut_internal_no_hmm(self, sentence):
return list(self.__cut_internal(sentence, False))
def cut(self, sentence, HMM=True):
for w in self.__cut_internal(sentence, HMM=HMM):
yield w
def lcut(self, *args, **kwargs):
return list(self.cut(*args, **kwargs))
# default Tokenizer instance
dt = POSTokenizer(jieba.dt)
# global functions
initialize = dt.initialize
def _lcut_internal(s):
return dt._lcut_internal(s)
def _lcut_internal_no_hmm(s):
return dt._lcut_internal_no_hmm(s)
def cut(sentence, HMM=True):
"""
Global `cut` function that supports parallel processing.
Note that this only works using dt, custom POSTokenizer
instances are not supported.
"""
global dt
if jieba.pool is None:
for w in dt.cut(sentence, HMM=HMM):
yield w
else:
parts = strdecode(sentence).splitlines(True)
if HMM:
result = jieba.pool.map(_lcut_internal, parts)
else:
result = jieba.pool.map(_lcut_internal_no_hmm, parts)
for r in result:
for w in r:
yield w
def lcut(sentence, HMM=True):
return list(cut(sentence, HMM))
| apache-2.0 |
grangier/django-11599 | django/contrib/admin/templatetags/admin_list.py | 4 | 15057 | from django.conf import settings
from django.contrib.admin.views.main import ALL_VAR, EMPTY_CHANGELIST_VALUE
from django.contrib.admin.views.main import ORDER_VAR, ORDER_TYPE_VAR, PAGE_VAR, SEARCH_VAR
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.utils import dateformat
from django.utils.html import escape, conditional_escape
from django.utils.text import capfirst
from django.utils.safestring import mark_safe
from django.utils.translation import get_date_formats, get_partial_date_formats, ugettext as _
from django.utils.encoding import smart_unicode, smart_str, force_unicode
from django.template import Library
import datetime
register = Library()
DOT = '.'
def paginator_number(cl,i):
if i == DOT:
return u'... '
elif i == cl.page_num:
return mark_safe(u'<span class="this-page">%d</span> ' % (i+1))
else:
return mark_safe(u'<a href="%s"%s>%d</a> ' % (escape(cl.get_query_string({PAGE_VAR: i})), (i == cl.paginator.num_pages-1 and ' class="end"' or ''), i+1))
paginator_number = register.simple_tag(paginator_number)
def pagination(cl):
paginator, page_num = cl.paginator, cl.page_num
pagination_required = (not cl.show_all or not cl.can_show_all) and cl.multi_page
if not pagination_required:
page_range = []
else:
ON_EACH_SIDE = 3
ON_ENDS = 2
# If there are 10 or fewer pages, display links to every page.
# Otherwise, do some fancy
if paginator.num_pages <= 10:
page_range = range(paginator.num_pages)
else:
# Insert "smart" pagination links, so that there are always ON_ENDS
# links at either end of the list of pages, and there are always
# ON_EACH_SIDE links at either end of the "current page" link.
page_range = []
if page_num > (ON_EACH_SIDE + ON_ENDS):
page_range.extend(range(0, ON_EACH_SIDE - 1))
page_range.append(DOT)
page_range.extend(range(page_num - ON_EACH_SIDE, page_num + 1))
else:
page_range.extend(range(0, page_num + 1))
if page_num < (paginator.num_pages - ON_EACH_SIDE - ON_ENDS - 1):
page_range.extend(range(page_num + 1, page_num + ON_EACH_SIDE + 1))
page_range.append(DOT)
page_range.extend(range(paginator.num_pages - ON_ENDS, paginator.num_pages))
else:
page_range.extend(range(page_num + 1, paginator.num_pages))
need_show_all_link = cl.can_show_all and not cl.show_all and cl.multi_page
return {
'cl': cl,
'pagination_required': pagination_required,
'show_all_url': need_show_all_link and cl.get_query_string({ALL_VAR: ''}),
'page_range': page_range,
'ALL_VAR': ALL_VAR,
'1': 1,
}
pagination = register.inclusion_tag('admin/pagination.html')(pagination)
def result_headers(cl):
lookup_opts = cl.lookup_opts
for i, field_name in enumerate(cl.list_display):
attr = None
try:
f = lookup_opts.get_field(field_name)
admin_order_field = None
except models.FieldDoesNotExist:
# For non-field list_display values, check for the function
# attribute "short_description". If that doesn't exist, fall back
# to the method name. And __str__ and __unicode__ are special-cases.
if field_name == '__unicode__':
header = force_unicode(lookup_opts.verbose_name)
elif field_name == '__str__':
header = smart_str(lookup_opts.verbose_name)
else:
if callable(field_name):
attr = field_name # field_name can be a callable
else:
try:
attr = getattr(cl.model_admin, field_name)
except AttributeError:
try:
attr = getattr(cl.model, field_name)
except AttributeError:
raise AttributeError, \
"'%s' model or '%s' objects have no attribute '%s'" % \
(lookup_opts.object_name, cl.model_admin.__class__, field_name)
try:
header = attr.short_description
except AttributeError:
if callable(field_name):
header = field_name.__name__
else:
header = field_name
header = header.replace('_', ' ')
# It is a non-field, but perhaps one that is sortable
admin_order_field = getattr(attr, "admin_order_field", None)
if not admin_order_field:
yield {"text": header}
continue
# So this _is_ a sortable non-field. Go to the yield
# after the else clause.
else:
header = f.verbose_name
th_classes = []
new_order_type = 'asc'
if field_name == cl.order_field or admin_order_field == cl.order_field:
th_classes.append('sorted %sending' % cl.order_type.lower())
new_order_type = {'asc': 'desc', 'desc': 'asc'}[cl.order_type.lower()]
yield {"text": header,
"sortable": True,
"url": cl.get_query_string({ORDER_VAR: i, ORDER_TYPE_VAR: new_order_type}),
"class_attrib": mark_safe(th_classes and ' class="%s"' % ' '.join(th_classes) or '')}
def _boolean_icon(field_val):
BOOLEAN_MAPPING = {True: 'yes', False: 'no', None: 'unknown'}
return mark_safe(u'<img src="%simg/admin/icon-%s.gif" alt="%s" />' % (settings.ADMIN_MEDIA_PREFIX, BOOLEAN_MAPPING[field_val], field_val))
def items_for_result(cl, result, form):
first = True
pk = cl.lookup_opts.pk.attname
for field_name in cl.list_display:
row_class = ''
try:
f = cl.lookup_opts.get_field(field_name)
except models.FieldDoesNotExist:
# For non-field list_display values, the value is either a method,
# property or returned via a callable.
try:
if callable(field_name):
attr = field_name
value = attr(result)
elif hasattr(cl.model_admin, field_name) and \
not field_name == '__str__' and not field_name == '__unicode__':
attr = getattr(cl.model_admin, field_name)
value = attr(result)
else:
attr = getattr(result, field_name)
if callable(attr):
value = attr()
else:
value = attr
allow_tags = getattr(attr, 'allow_tags', False)
boolean = getattr(attr, 'boolean', False)
if boolean:
allow_tags = True
result_repr = _boolean_icon(value)
else:
result_repr = smart_unicode(value)
except (AttributeError, ObjectDoesNotExist):
result_repr = EMPTY_CHANGELIST_VALUE
else:
# Strip HTML tags in the resulting text, except if the
# function has an "allow_tags" attribute set to True.
if not allow_tags:
result_repr = escape(result_repr)
else:
result_repr = mark_safe(result_repr)
else:
field_val = getattr(result, f.attname)
if isinstance(f.rel, models.ManyToOneRel):
if field_val is not None:
result_repr = escape(getattr(result, f.name))
else:
result_repr = EMPTY_CHANGELIST_VALUE
# Dates and times are special: They're formatted in a certain way.
elif isinstance(f, models.DateField) or isinstance(f, models.TimeField):
if field_val:
(date_format, datetime_format, time_format) = get_date_formats()
if isinstance(f, models.DateTimeField):
result_repr = capfirst(dateformat.format(field_val, datetime_format))
elif isinstance(f, models.TimeField):
result_repr = capfirst(dateformat.time_format(field_val, time_format))
else:
result_repr = capfirst(dateformat.format(field_val, date_format))
else:
result_repr = EMPTY_CHANGELIST_VALUE
row_class = ' class="nowrap"'
# Booleans are special: We use images.
elif isinstance(f, models.BooleanField) or isinstance(f, models.NullBooleanField):
result_repr = _boolean_icon(field_val)
# DecimalFields are special: Zero-pad the decimals.
elif isinstance(f, models.DecimalField):
if field_val is not None:
result_repr = ('%%.%sf' % f.decimal_places) % field_val
else:
result_repr = EMPTY_CHANGELIST_VALUE
# Fields with choices are special: Use the representation
# of the choice.
elif f.flatchoices:
result_repr = dict(f.flatchoices).get(field_val, EMPTY_CHANGELIST_VALUE)
else:
result_repr = escape(field_val)
if force_unicode(result_repr) == '':
result_repr = mark_safe(' ')
# If list_display_links not defined, add the link tag to the first field
if (first and not cl.list_display_links) or field_name in cl.list_display_links:
table_tag = {True:'th', False:'td'}[first]
first = False
url = cl.url_for_result(result)
# Convert the pk to something that can be used in Javascript.
# Problem cases are long ints (23L) and non-ASCII strings.
if cl.to_field:
attr = str(cl.to_field)
else:
attr = pk
value = result.serializable_value(attr)
result_id = repr(force_unicode(value))[1:]
yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' % \
(table_tag, row_class, url, (cl.is_popup and ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id or ''), conditional_escape(result_repr), table_tag))
else:
# By default the fields come from ModelAdmin.list_editable, but if we pull
# the fields out of the form instead of list_editable custom admins
# can provide fields on a per request basis
if form and field_name in form.fields:
bf = form[field_name]
result_repr = mark_safe(force_unicode(bf.errors) + force_unicode(bf))
else:
result_repr = conditional_escape(result_repr)
yield mark_safe(u'<td%s>%s</td>' % (row_class, result_repr))
if form:
yield mark_safe(force_unicode(form[cl.model._meta.pk.name]))
def results(cl):
if cl.formset:
for res, form in zip(cl.result_list, cl.formset.forms):
yield list(items_for_result(cl, res, form))
else:
for res in cl.result_list:
yield list(items_for_result(cl, res, None))
def result_list(cl):
return {'cl': cl,
'result_headers': list(result_headers(cl)),
'results': list(results(cl))}
result_list = register.inclusion_tag("admin/change_list_results.html")(result_list)
def date_hierarchy(cl):
if cl.date_hierarchy:
field_name = cl.date_hierarchy
year_field = '%s__year' % field_name
month_field = '%s__month' % field_name
day_field = '%s__day' % field_name
field_generic = '%s__' % field_name
year_lookup = cl.params.get(year_field)
month_lookup = cl.params.get(month_field)
day_lookup = cl.params.get(day_field)
year_month_format, month_day_format = get_partial_date_formats()
link = lambda d: cl.get_query_string(d, [field_generic])
if year_lookup and month_lookup and day_lookup:
day = datetime.date(int(year_lookup), int(month_lookup), int(day_lookup))
return {
'show': True,
'back': {
'link': link({year_field: year_lookup, month_field: month_lookup}),
'title': dateformat.format(day, year_month_format)
},
'choices': [{'title': dateformat.format(day, month_day_format)}]
}
elif year_lookup and month_lookup:
days = cl.query_set.filter(**{year_field: year_lookup, month_field: month_lookup}).dates(field_name, 'day')
return {
'show': True,
'back': {
'link': link({year_field: year_lookup}),
'title': year_lookup
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month_lookup, day_field: day.day}),
'title': dateformat.format(day, month_day_format)
} for day in days]
}
elif year_lookup:
months = cl.query_set.filter(**{year_field: year_lookup}).dates(field_name, 'month')
return {
'show' : True,
'back': {
'link' : link({}),
'title': _('All dates')
},
'choices': [{
'link': link({year_field: year_lookup, month_field: month.month}),
'title': dateformat.format(month, year_month_format)
} for month in months]
}
else:
years = cl.query_set.dates(field_name, 'year')
return {
'show': True,
'choices': [{
'link': link({year_field: year.year}),
'title': year.year
} for year in years]
}
date_hierarchy = register.inclusion_tag('admin/date_hierarchy.html')(date_hierarchy)
def search_form(cl):
return {
'cl': cl,
'show_result_count': cl.result_count != cl.full_result_count,
'search_var': SEARCH_VAR
}
search_form = register.inclusion_tag('admin/search_form.html')(search_form)
def admin_list_filter(cl, spec):
return {'title': spec.title(), 'choices' : list(spec.choices(cl))}
admin_list_filter = register.inclusion_tag('admin/filter.html')(admin_list_filter)
def admin_actions(context):
"""
Track the number of times the action field has been rendered on the page,
so we know which value to use.
"""
context['action_index'] = context.get('action_index', -1) + 1
return context
admin_actions = register.inclusion_tag("admin/actions.html", takes_context=True)(admin_actions)
| bsd-3-clause |
ppizarror/Ned-For-Spod | bin/external/pil/OleFileIO.py | 1 | 15390 | #
# THIS IS WORK IN PROGRESS
#
# The Python Imaging Library
# $Id$
#
# stuff to deal with OLE2 Structured Storage files. this module is
# used by PIL to read Image Composer and FlashPix files, but can also
# be used to read other files of this type.
#
# History:
# 1997-01-20 fl Created
# 1997-01-22 fl Fixed 64-bit portability quirk
# 2003-09-09 fl Fixed typo in OleFileIO.loadfat (noted by Daniel Haertle)
# 2004-02-29 fl Changed long hex constants to signed integers
#
# Notes:
#
# Literature:
#
# "FlashPix Format Specification, Appendix A", Kodak and Microsoft,
# September 1996.
#
# Quotes:
#
# "If this document and functionality of the Software conflict,
# the actual functionality of the Software represents the correct
# functionality" -- Microsoft, in the OLE format specification
#
# Copyright (c) Secret Labs AB 1997.
# Copyright (c) Fredrik Lundh 1997.
#
# See the README file for information on usage and redistribution.
#
import string, StringIO
def i16(c, o = 0):
return ord(c[o])+(ord(c[o+1])<<8)
def i32(c, o = 0):
return ord(c[o])+(ord(c[o+1])<<8)+(ord(c[o+2])<<16)+(ord(c[o+3])<<24)
MAGIC = '\320\317\021\340\241\261\032\341'
#
# --------------------------------------------------------------------
# property types
VT_EMPTY=0; VT_NULL=1; VT_I2=2; VT_I4=3; VT_R4=4; VT_R8=5; VT_CY=6;
VT_DATE=7; VT_BSTR=8; VT_DISPATCH=9; VT_ERROR=10; VT_BOOL=11;
VT_VARIANT=12; VT_UNKNOWN=13; VT_DECIMAL=14; VT_I1=16; VT_UI1=17;
VT_UI2=18; VT_UI4=19; VT_I8=20; VT_UI8=21; VT_INT=22; VT_UINT=23;
VT_VOID=24; VT_HRESULT=25; VT_PTR=26; VT_SAFEARRAY=27; VT_CARRAY=28;
VT_USERDEFINED=29; VT_LPSTR=30; VT_LPWSTR=31; VT_FILETIME=64;
VT_BLOB=65; VT_STREAM=66; VT_STORAGE=67; VT_STREAMED_OBJECT=68;
VT_STORED_OBJECT=69; VT_BLOB_OBJECT=70; VT_CF=71; VT_CLSID=72;
VT_VECTOR=0x1000;
# map property id to name (for debugging purposes)
VT = {}
for k, v in vars().items():
if k[:3] == "VT_":
VT[v] = k
#
# --------------------------------------------------------------------
# Some common document types (root.clsid fields)
WORD_CLSID = "00020900-0000-0000-C000-000000000046"
#
# --------------------------------------------------------------------
class _OleStream(StringIO.StringIO):
"""OLE2 Stream
Returns a read-only file object which can be used to read
the contents of a OLE stream. To open a stream, use the
openstream method in the OleFile class.
This function can be used with either ordinary streams,
or ministreams, depending on the offset, sectorsize, and
fat table arguments.
"""
def __init__(self, fp, sect, size, offset, sectorsize, fat):
data = []
while sect != -2: # 0xFFFFFFFEL:
fp.seek(offset + sectorsize * sect)
data.append(fp.read(sectorsize))
sect = fat[sect]
data = string.join(data, "")
# print len(data), size
StringIO.StringIO.__init__(self, data[:size])
#
# --------------------------------------------------------------------
class _OleDirectoryEntry:
"""OLE2 Directory Entry
Encapsulates a stream directory entry. Note that the
constructor builds a tree of all subentries, so we only
have to call it with the root object.
"""
def __init__(self, sidlist, sid):
# store directory parameters. the caller provides
# a complete list of directory entries, as read from
# the directory stream.
name, type, sect, size, sids, clsid = sidlist[sid]
self.sid = sid
self.name = name
self.type = type # 1=storage 2=stream
self.sect = sect
self.size = size
self.clsid = clsid
# process child nodes, if any
self.kids = []
sid = sidlist[sid][4][2]
if sid != -1:
# the directory entries are organized as a red-black tree.
# the following piece of code does an ordered traversal of
# such a tree (at least that's what I hope ;-)
stack = [self.sid]
# start at leftmost position
left, right, child = sidlist[sid][4]
while left != -1: # 0xFFFFFFFFL:
stack.append(sid)
sid = left
left, right, child = sidlist[sid][4]
while sid != self.sid:
self.kids.append(_OleDirectoryEntry(sidlist, sid))
# try to move right
left, right, child = sidlist[sid][4]
if right != -1: # 0xFFFFFFFFL:
# and then back to the left
sid = right
while 1:
left, right, child = sidlist[sid][4]
if left == -1: # 0xFFFFFFFFL:
break
stack.append(sid)
sid = left
else:
# couldn't move right; move up instead
while 1:
ptr = stack[-1]
del stack[-1]
left, right, child = sidlist[ptr][4]
if right != sid:
break
sid = right
left, right, child = sidlist[sid][4]
if right != ptr:
sid = ptr
# in the OLE file, entries are sorted on (length, name).
# for convenience, we sort them on name instead.
self.kids.sort()
def __cmp__(self, other):
"Compare entries by name"
return cmp(self.name, other.name)
def dump(self, tab = 0):
"Dump this entry, and all its subentries (for debug purposes only)"
TYPES = ["(invalid)", "(storage)", "(stream)", "(lockbytes)",
"(property)", "(root)"]
print " "*tab + repr(self.name), TYPES[self.type],
if self.type in (2, 5):
print self.size, "bytes",
print
if self.type in (1, 5) and self.clsid:
print " "*tab + "{%s}" % self.clsid
for kid in self.kids:
kid.dump(tab + 2)
#
# --------------------------------------------------------------------
##
# This class encapsulates the interface to an OLE 2 structured
# storage file. Use the {@link listdir} and {@link openstream}
# methods to access the contents of this file.
class OleFileIO:
"""OLE container object
This class encapsulates the interface to an OLE 2 structured
storage file. Use the listdir and openstream methods to access
the contents of this file.
Object names are given as a list of strings, one for each subentry
level. The root entry should be omitted. For example, the following
code extracts all image streams from a Microsoft Image Composer file:
ole = OleFileIO("fan.mic")
for entry in ole.listdir():
if entry[1:2] == "Image":
fin = ole.openstream(entry)
fout = open(entry[0:1], "wb")
while 1:
s = fin.read(8192)
if not s:
break
fout.write(s)
You can use the viewer application provided with the Python Imaging
Library to view the resulting files (which happens to be standard
TIFF files).
"""
def __init__(self, filename = None):
if filename:
self.open(filename)
##
# Open an OLE2 file.
def open(self, filename):
"""Open an OLE2 file"""
if type(filename) == type(""):
self.fp = open(filename, "rb")
else:
self.fp = filename
header = self.fp.read(512)
if len(header) != 512 or header[:8] != MAGIC:
raise IOError, "not an OLE2 structured storage file"
# file clsid (probably never used, so we don't store it)
clsid = self._clsid(header[8:24])
self.sectorsize = 1 << i16(header, 30)
self.minisectorsize = 1 << i16(header, 32)
self.minisectorcutoff = i32(header, 56)
# Load file allocation tables
self.loadfat(header)
# Load direcory. This sets both the sidlist (ordered by id)
# and the root (ordered by hierarchy) members.
self.loaddirectory(i32(header, 48))
self.ministream = None
self.minifatsect = i32(header, 60)
def loadfat(self, header):
# Load the FAT table. The header contains a sector numbers
# for the first 109 FAT sectors. Additional sectors are
# described by DIF blocks
sect = header[76:512]
fat = []
for i in range(0, len(sect), 4):
ix = i32(sect, i)
if ix == -2 or ix == -1: # ix == 0xFFFFFFFEL or ix == 0xFFFFFFFFL:
break
s = self.getsect(ix)
fat = fat + map(lambda i, s=s: i32(s, i), range(0, len(s), 4))
self.fat = fat
def loadminifat(self):
# Load the MINIFAT table. This is stored in a standard sub-
# stream, pointed to by a header field.
s = self._open(self.minifatsect).read()
self.minifat = map(lambda i, s=s: i32(s, i), range(0, len(s), 4))
def getsect(self, sect):
# Read given sector
self.fp.seek(512 + self.sectorsize * sect)
return self.fp.read(self.sectorsize)
def _unicode(self, s):
# Map unicode string to Latin 1
return filter(ord, s)
def loaddirectory(self, sect):
# Load the directory. The directory is stored in a standard
# substream, independent of its size.
# read directory stream
fp = self._open(sect)
# create list of sid entries
self.sidlist = []
while 1:
entry = fp.read(128)
if not entry:
break
type = ord(entry[66])
name = self._unicode(entry[0:0+i16(entry, 64)])
ptrs = i32(entry, 68), i32(entry, 72), i32(entry, 76)
sect, size = i32(entry, 116), i32(entry, 120)
clsid = self._clsid(entry[80:96])
self.sidlist.append((name, type, sect, size, ptrs, clsid))
# create hierarchical list of directory entries
self.root = _OleDirectoryEntry(self.sidlist, 0)
def dumpdirectory(self):
# Dump directory (for debugging only)
self.root.dump()
def _clsid(self, clsid):
if clsid == "\0" * len(clsid):
return ""
return (("%08X-%04X-%04X-%02X%02X-" + "%02X" * 6) %
((i32(clsid, 0), i16(clsid, 4), i16(clsid, 6)) +
tuple(map(ord, clsid[8:16]))))
def _list(self, files, prefix, node):
# listdir helper
prefix = prefix + [node.name]
for entry in node.kids:
if entry.kids:
self._list(files, prefix, entry)
else:
files.append(prefix[1:] + [entry.name])
def _find(self, filename):
# openstream helper
node = self.root
for name in filename:
for kid in node.kids:
if kid.name == name:
break
else:
raise IOError, "file not found"
node = kid
return node.sid
def _open(self, start, size = 0x7FFFFFFF):
# openstream helper.
if size < self.minisectorcutoff:
# ministream object
if not self.ministream:
self.loadminifat()
self.ministream = self._open(self.sidlist[0][2])
return _OleStream(self.ministream, start, size, 0,
self.minisectorsize, self.minifat)
# standard stream
return _OleStream(self.fp, start, size, 512,
self.sectorsize, self.fat)
##
# Returns a list of streams stored in this file.
def listdir(self):
"""Return a list of streams stored in this file"""
files = []
self._list(files, [], self.root)
return files
##
# Opens a stream as a read-only file object.
def openstream(self, filename):
"""Open a stream as a read-only file object"""
slot = self._find(filename)
name, type, sect, size, sids, clsid = self.sidlist[slot]
if type != 2:
raise IOError, "this file is not a stream"
return self._open(sect, size)
##
# Gets a list of properties described in substream.
def getproperties(self, filename):
"""Return properties described in substream"""
fp = self.openstream(filename)
data = {}
# header
s = fp.read(28)
clsid = self._clsid(s[8:24])
# format id
s = fp.read(20)
fmtid = self._clsid(s[:16])
fp.seek(i32(s, 16))
# get section
s = "****" + fp.read(i32(fp.read(4))-4)
for i in range(i32(s, 4)):
id = i32(s, 8+i*8)
offset = i32(s, 12+i*8)
type = i32(s, offset)
# test for common types first (should perhaps use
# a dictionary instead?)
if type == VT_I2:
value = i16(s, offset+4)
if value >= 32768:
value = value - 65536
elif type == VT_UI2:
value = i16(s, offset+4)
elif type in (VT_I4, VT_ERROR):
value = i32(s, offset+4)
elif type == VT_UI4:
value = i32(s, offset+4) # FIXME
elif type in (VT_BSTR, VT_LPSTR):
count = i32(s, offset+4)
value = s[offset+8:offset+8+count-1]
elif type == VT_BLOB:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
elif type == VT_LPWSTR:
count = i32(s, offset+4)
value = self._unicode(s[offset+8:offset+8+count*2])
elif type == VT_FILETIME:
value = long(i32(s, offset+4)) + (long(i32(s, offset+8))<<32)
value = value / 10000000L # seconds
elif type == VT_UI1:
value = ord(s[offset+4])
elif type == VT_CLSID:
value = self._clsid(s[offset+4:offset+20])
elif type == VT_CF:
count = i32(s, offset+4)
value = s[offset+8:offset+8+count]
else:
value = None # everything else yields "None"
#print "%08x" % id, repr(value),
#print "(%s)" % VT[i32(s, offset) & 0xFFF]
data[id] = value
return data
#
# --------------------------------------------------------------------
# This script can be used to dump the directory of any OLE2 structured
# storage file.
if __name__ == "__main__":
import sys
for file in sys.argv[1:]:
try:
ole = OleFileIO(file)
print "-" * 68
print file
print "-" * 68
ole.dumpdirectory()
for file in ole.listdir():
if file[-1][0] == "\005":
print file
props = ole.getproperties(file)
props = props.items()
props.sort()
for k, v in props:
print " ", k, v
except IOError, v:
print "***", "cannot read", file, "-", v
| gpl-2.0 |
jlmucb/cloudproxy | src/third_party/googlemock/gtest/test/gtest_break_on_failure_unittest.py | 2140 | 7339 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for Google Test's break-on-failure mode.
A user can ask Google Test to seg-fault when an assertion fails, using
either the GTEST_BREAK_ON_FAILURE environment variable or the
--gtest_break_on_failure flag. This script tests such functionality
by invoking gtest_break_on_failure_unittest_ (a program written with
Google Test) with different environments and command line flags.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import gtest_test_utils
import os
import sys
# Constants.
IS_WINDOWS = os.name == 'nt'
# The environment variable for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE'
# The command line flag for enabling/disabling the break-on-failure mode.
BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure'
# The environment variable for enabling/disabling the throw-on-failure mode.
THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE'
# The environment variable for enabling/disabling the catch-exceptions mode.
CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS'
# Path to the gtest_break_on_failure_unittest_ program.
EXE_PATH = gtest_test_utils.GetTestExecutablePath(
'gtest_break_on_failure_unittest_')
environ = gtest_test_utils.environ
SetEnvVar = gtest_test_utils.SetEnvVar
# Tests in this file run a Google-Test-based test program and expect it
# to terminate prematurely. Therefore they are incompatible with
# the premature-exit-file protocol by design. Unset the
# premature-exit filepath to prevent Google Test from creating
# the file.
SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None)
def Run(command):
"""Runs a command; returns 1 if it was killed by a signal, or 0 otherwise."""
p = gtest_test_utils.Subprocess(command, env=environ)
if p.terminated_by_signal:
return 1
else:
return 0
# The tests.
class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable or
the --gtest_break_on_failure flag to turn assertion failures into
segmentation faults.
"""
def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault):
"""Runs gtest_break_on_failure_unittest_ and verifies that it does
(or does not) have a seg-fault.
Args:
env_var_value: value of the GTEST_BREAK_ON_FAILURE environment
variable; None if the variable should be unset.
flag_value: value of the --gtest_break_on_failure flag;
None if the flag should not be present.
expect_seg_fault: 1 if the program is expected to generate a seg-fault;
0 otherwise.
"""
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value)
if env_var_value is None:
env_var_value_msg = ' is not set'
else:
env_var_value_msg = '=' + env_var_value
if flag_value is None:
flag = ''
elif flag_value == '0':
flag = '--%s=0' % BREAK_ON_FAILURE_FLAG
else:
flag = '--%s' % BREAK_ON_FAILURE_FLAG
command = [EXE_PATH]
if flag:
command.append(flag)
if expect_seg_fault:
should_or_not = 'should'
else:
should_or_not = 'should not'
has_seg_fault = Run(command)
SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None)
msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' %
(BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command),
should_or_not))
self.assert_(has_seg_fault == expect_seg_fault, msg)
def testDefaultBehavior(self):
"""Tests the behavior of the default mode."""
self.RunAndVerify(env_var_value=None,
flag_value=None,
expect_seg_fault=0)
def testEnvVar(self):
"""Tests using the GTEST_BREAK_ON_FAILURE environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value=None,
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value=None,
expect_seg_fault=1)
def testFlag(self):
"""Tests using the --gtest_break_on_failure flag."""
self.RunAndVerify(env_var_value=None,
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
def testFlagOverridesEnvVar(self):
"""Tests that the flag overrides the environment variable."""
self.RunAndVerify(env_var_value='0',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='0',
flag_value='1',
expect_seg_fault=1)
self.RunAndVerify(env_var_value='1',
flag_value='0',
expect_seg_fault=0)
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
def testBreakOnFailureOverridesThrowOnFailure(self):
"""Tests that gtest_break_on_failure overrides gtest_throw_on_failure."""
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value=None,
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None)
if IS_WINDOWS:
def testCatchExceptionsDoesNotInterfere(self):
"""Tests that gtest_catch_exceptions doesn't interfere."""
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1')
try:
self.RunAndVerify(env_var_value='1',
flag_value='1',
expect_seg_fault=1)
finally:
SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None)
if __name__ == '__main__':
gtest_test_utils.Main()
| apache-2.0 |
igsr/igsr_analysis | PyHive/Factories/SplitVCFintoChros.py | 1 | 2227 | import eHive
import os
import sys
from VCF.VCFfilter.BCFTools import BCFTools
class SplitVCFintoChros(eHive.BaseRunnable):
"""Split a VCF into the chromosomes present in a Fasta index"""
def __str_to_bool(self, s):
if s == 'True':
return True
elif s == 'False':
return False
else:
raise ValueError # evil ValueError!
def run(self):
self.warning('Split file: %s'% self.param_required('filepath'))
ifile = self.param_required('filepath')
file = os.path.split(ifile)[1]
outdir = self.param_required('work_dir')
faix = self.param_required('faix')
verbose = None
if self.param_is_defined('verbose'):
verbose = self.__str_to_bool(self.param('verbose'))
filt_string = None
if self.param_is_defined('filt_string'):
filt_string = self.param('filt_string')
bcftools_o = BCFTools(vcf=ifile, bcftools_folder=self.param('bcftools_folder'))
files = []
ix = 1
for line in open(faix):
if line.startswith("\n"):
continue
chrom = line.split('\t')[0]
self.warning('Splitting %s'% chrom)
chr_folder = outdir+"/"+chrom
if not os.path.isdir(chr_folder):
os.mkdir(chr_folder)
vcffile = bcftools_o.subset_vcf(region=chrom, outprefix=file, outdir=chr_folder,
create_index=True, apply_filters=filt_string,
threads=self.param('threads'), action='include',
verbose=verbose)
files.append(
{
'chr': vcffile,
'ix': ix,
'chromname': chrom
}
)
ix += 1
self.param('files', files)
def write_output(self):
self.warning('{0} files have been created'.format(len(self.param('files'))))
if self.param('verbose') == "True":
for f in self.param('files'):
self.warning("Chr file is %s" % f)
self.dataflow(self.param('files'), 2)
| apache-2.0 |
gregn610/workalendar | workalendar/usa.py | 1 | 33148 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from datetime import date, timedelta
from workalendar.core import WesternCalendar, ChristianMixin, Calendar
from workalendar.core import SUN, MON, TUE, WED, THU, FRI, SAT
NONE, NEAREST_WEEKDAY, MONDAY = range(3)
class UnitedStates(WesternCalendar, ChristianMixin):
"United States of America"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(7, 4, 'Independence Day'),
(11, 11, 'Veterans Day'),
)
@staticmethod
def is_presidential_year(year):
return (year % 4) == 0
def get_variable_days(self, year):
# usual variable days
days = super(UnitedStates, self).get_variable_days(year)
days += [
(UnitedStates.get_nth_weekday_in_month(year, 1, MON, 3),
'Martin Luther King, Jr. Day'),
(UnitedStates.get_nth_weekday_in_month(year, 2, MON, 3),
"Washington's Birthday"),
(UnitedStates.get_last_weekday_in_month(year, 5, MON),
"Memorial Day"),
(UnitedStates.get_nth_weekday_in_month(year, 9, MON),
"Labor Day"),
(UnitedStates.get_nth_weekday_in_month(year, 10, MON, 2),
"Colombus Day"),
(UnitedStates.get_nth_weekday_in_month(year, 11, THU, 4),
"Thanksgiving Day"),
]
# Inauguration day
if UnitedStates.is_presidential_year(year - 1):
inauguration_day = date(year, 1, 20)
if inauguration_day.weekday() == SUN:
inauguration_day = date(year, 1, 21)
days.append((inauguration_day, "Inauguration Day"))
return days
class FloatToNearestWeekdayMixin(Calendar):
"Float Saturady to Friday, Sunday to Monday"
def float(self, holidays, year=0):
new_holidays = []
holiday_lookup = [x[0] for x in holidays]
for holiday in holidays:
if holiday[0].weekday() == SAT:
new_holidays.append((holiday[0] - timedelta(days=1),
holiday[1] + " (Observed)"))
elif holiday[0].weekday() == SUN:
new_holidays.append((holiday[0] + timedelta(days=1),
holiday[1] + " (Observed)"))
if year > 0 and date(year + 1, 1, 1).weekday() == SAT:
new_holidays.append((date(year, 12, 31,),
"New Years Day (Observed)"))
year = holiday_lookup[0].year
if (date(year, 12, 25) in holiday_lookup and
date(year, 12, 24) in holiday_lookup and
date(year, 12, 25).weekday() == SAT):
new_holidays.append((date(year, 12, 23),
"Christmas Eve (Observed)"))
if (date(year, 12, 25) in holiday_lookup and
date(year, 12, 24) in holiday_lookup and
date(year, 12, 25).weekday() == MON):
new_holidays.append((date(year, 12, 26),
"Christmas Eve (Observed)"))
new_holidays.append((date(year, 12, 27),
"Christmas Day (Observed)"))
return holidays + new_holidays
class WashingtonsBirthdayInDecemberMixin(Calendar):
"""Floating observance, to give long weekend at christmas"""
def get_washington_birthday(self, year,
label="Washington's Birthday (Observed)"):
christmas_day = date(year, 12, 25).weekday()
if christmas_day == MON:
day = (date(year, 12, 26), label) # TUE
elif christmas_day == TUE:
day = (date(year, 12, 24), label) # MON
elif christmas_day == WED:
day = (date(year, 12, 24), label) # TUE
elif christmas_day == THU:
day = (date(year, 12, 26), label) # FRI
elif christmas_day == FRI:
day = (date(year, 12, 24), label) # THU
elif christmas_day == SAT:
day = (date(year, 12, 23), label) # THU
else: # christmas_day == SUN:
day = (date(year, 12, 23), label) # FRI
return day
class CesarChavezDayMixin(Calendar):
"""31st of March, float to 1st April if Monday"""
def get_chavez_day(self, year):
days = [(date(year, 3, 31), "Cesar Chavez Day")]
if date(year, 3, 31).weekday() == SUN:
days += [(date(year, 4, 1), "Cesar Chavez Day (Observed)")]
return days
class MardiGrasMixin(ChristianMixin):
"""Tuesday before Ash Wednesday"""
def get_mardis_gras(self, year):
sunday = self.get_easter_sunday(year)
return (sunday - timedelta(days=47), "Mardi Gras")
class DayAfterChristmasNoFloatMixin(Calendar):
"""26th of December - but doesn't ever float"""
def get_day_after_christmas(self, year):
days = (date(year, 12, 26), "Day After Christmas")
return days
class PatriotsDayMixin(Calendar):
"""3rd Monday of April"""
def get_patriots_day(self, year):
days = (self.get_nth_weekday_in_month(year, 4, MON, 3),
"Patriots Day")
return days
class ConfederateMemorialDayMixin(Calendar):
"""4th Monday of April"""
def get_confederate_day(self, year):
return (Alabama.get_nth_weekday_in_month(year, 4, MON, 4),
"Confederate Day")
class ThanksgivingFridayMixin(Calendar):
"4th Friday in November"
def get_thanksgiving_friday(self, year, label="Thanksgiving Friday"):
return (self.get_nth_weekday_in_month(year, 11, FRI, 4), label)
class Alabama(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ConfederateMemorialDayMixin):
"Alabama"
def get_variable_days(self, year):
days = super(Alabama, self).get_variable_days(year)
days = super(Alabama, self).float(days)
days = days + [self.get_confederate_day(year)]
days = days + [(Alabama.get_nth_weekday_in_month(year, 6, MON, 1),
"Jefferson Davis Birthday")]
return days
def get_fixed_holidays(self, year):
days = super(Alabama, self).get_fixed_holidays(year)
days = super(Alabama, self).float(days, year)
return days
class Alaska(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Alaska"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(10, 18, 'Alaska Day'),
)
def get_variable_days(self, year):
days = super(Alaska, self).get_variable_days(year)
days = super(Alaska, self).float(days)
days = days + [(Alabama.get_last_weekday_in_month(year, 3, MON),
"Seward's Day")]
return days
def get_fixed_holidays(self, year):
days = super(Alaska, self).get_fixed_holidays(year)
days = super(Alaska, self).float(days, year)
return days
class Arizona(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Arizona"""
def get_variable_days(self, year):
days = super(Arizona, self).get_variable_days(year)
days = super(Arizona, self).float(days)
return days
def get_fixed_holidays(self, year):
days = super(Arizona, self).get_fixed_holidays(year)
days = super(Arizona, self).float(days, year)
return days
class Arkansas(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Arkansas"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(Arkansas, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Arkansas, self).get_fixed_holidays(year)
days = super(Arkansas, self).float(days, year)
return days
class California(UnitedStates, WesternCalendar, ThanksgivingFridayMixin,
FloatToNearestWeekdayMixin, CesarChavezDayMixin):
"""California"""
def get_variable_days(self, year):
days = super(California, self).get_variable_days(year)
days += [self.get_thanksgiving_friday(year)]
days += [(self.get_nth_weekday_in_month(year, 10, MON, 2),
"Indingenous People's Day")]
days += self.get_chavez_day(year)
return days
def get_fixed_holidays(self, year):
days = super(California, self).get_fixed_holidays(year)
days = super(California, self).float(days, year)
return days
class Colorado(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
CesarChavezDayMixin):
"""Colorado"""
def get_variable_days(self, year):
days = super(Colorado, self).get_variable_days(year)
days += self.get_chavez_day(year)
return days
def get_fixed_holidays(self, year):
days = super(Colorado, self).get_fixed_holidays(year)
days = super(Colorado, self).float(days, year)
return days
class Connecticut(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Connecticut"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(2, 12, "Lincoln's Birthday"),
)
include_good_friday = True
def get_variable_days(self, year):
days = super(Connecticut, self).get_variable_days(year)
days = super(Connecticut, self).float(days)
return days
def get_fixed_holidays(self, year):
days = super(Connecticut, self).get_fixed_holidays(year)
days = super(Connecticut, self).float(days, year)
return days
class Delaware(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Delaware"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Delaware, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Delaware, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Florida(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Florida"""
def get_variable_days(self, year):
days = super(Florida, self).get_variable_days(year)
days = super(Florida, self).float(days)
days = days + [(Florida.get_nth_weekday_in_month(year, 11, FRI, 4),
"Friday after Thanksgiving")]
return days
def get_fixed_holidays(self, year):
days = super(Florida, self).get_fixed_holidays(year)
days = super(Florida, self).float(days, year)
return days
class Georgia(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
WashingtonsBirthdayInDecemberMixin, ConfederateMemorialDayMixin):
"""Georgia"""
def get_variable_days(self, year):
days = super(Georgia, self).get_variable_days(year)
days = super(Georgia, self).float(days)
days += [self.get_confederate_day(year)]
days += [(Georgia.get_nth_weekday_in_month(year, 11, FRI, 4),
"Robert E. Lee's Birthday (Observed)")]
days += [self.get_washington_birthday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Georgia, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Hawaii(UnitedStates, WesternCalendar):
"""Hawaii"""
pass
class Idaho(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Idaho"""
def get_variable_days(self, year):
days = super(Idaho, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Idaho, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Illinois(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Illinois"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(2, 12, "Lincoln's Birthday"),
)
def get_variable_days(self, year):
days = super(Illinois, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Illinois, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Indiana(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
WashingtonsBirthdayInDecemberMixin, ThanksgivingFridayMixin):
"""Indiana"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Indiana, self).get_variable_days(year)
days = self.float(days)
days += [self.get_washington_birthday(year)]
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Indiana, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Iowa(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Iowa"""
def get_variable_days(self, year):
days = super(Iowa, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Iowa, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Kansas(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin, DayAfterChristmasNoFloatMixin):
"""Kansas"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(Kansas, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [self.get_day_after_christmas(year)]
return days
def get_fixed_holidays(self, year):
days = super(Kansas, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Kentucky(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin, DayAfterChristmasNoFloatMixin):
"""Kentucky"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Kentucky, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [self.get_day_after_christmas(year)]
return days
def get_fixed_holidays(self, year):
days = super(Kentucky, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Louisiana(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
MardiGrasMixin):
"""Louisiana"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Louisiana, self).get_variable_days(year)
days = self.float(days)
days += [self.get_mardis_gras(year)]
return days
def get_fixed_holidays(self, year):
days = super(Louisiana, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Maine(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
PatriotsDayMixin, ThanksgivingFridayMixin):
"""Maine"""
def get_variable_days(self, year):
days = super(Maine, self).get_variable_days(year)
days = self.float(days)
days += [self.get_patriots_day(year)]
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Maine, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Maryland(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Maryland"""
def get_variable_days(self, year):
days = super(Maryland, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Maryland, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Massachusetts(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
PatriotsDayMixin):
"""Massachusetts"""
def get_variable_days(self, year):
days = super(Massachusetts, self).get_variable_days(year)
days = self.float(days)
days += [self.get_patriots_day(year)]
return days
def get_fixed_holidays(self, year):
days = super(Massachusetts, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Michigan(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Michigan"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(Michigan, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Michigan, self).get_fixed_holidays(year)
days += [(date(year, 12, 31), "New Years Eve")]
days = self.float(days, year)
return days
class Minnesota(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Minnesota"""
def get_variable_days(self, year):
days = super(Minnesota, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Minnesota, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Mississippi(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin, ConfederateMemorialDayMixin):
"""Mississippi"""
def get_variable_days(self, year):
days = super(Mississippi, self).get_variable_days(year)
days = self.float(days)
days += [self.get_confederate_day(year)]
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Mississippi, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Missouri(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Missouri"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(2, 12, "Lincoln's Birthday"),
(5, 8, "Truman Day"),
)
def get_variable_days(self, year):
days = super(Missouri, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Missouri, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Montana(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Montana"""
def get_variable_days(self, year):
days = super(Montana, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Montana, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Nebraska(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Nebraska"""
def get_variable_days(self, year):
days = super(Nebraska, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [(self.get_last_weekday_in_month(year, 4, FRI),
"Arbor Day")]
return days
def get_fixed_holidays(self, year):
days = super(Nebraska, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Nevada(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Nevada"""
def get_variable_days(self, year):
days = super(Nevada, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [(self.get_last_weekday_in_month(year, 10, FRI),
"Nevada Day")]
return days
def get_fixed_holidays(self, year):
days = super(Nevada, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class NewHampshire(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""NewHampshire"""
def get_variable_days(self, year):
days = super(NewHampshire, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(NewHampshire, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class NewJersey(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""NewJersey"""
include_good_friday = True
def get_variable_days(self, year):
days = super(NewJersey, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(NewJersey, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class NewMexico(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""NewMexico"""
def get_variable_days(self, year):
days = super(NewMexico, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(NewMexico, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class NewYork(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""NewYork"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(2, 12, "Lincoln's Birthday"),
)
def get_variable_days(self, year):
days = super(NewYork, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(NewYork, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class NorthCarolina(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin, DayAfterChristmasNoFloatMixin):
"""NorthCarolina"""
include_good_friday = True
include_christmas_eve = True
def get_variable_days(self, year):
days = super(NorthCarolina, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [self.get_day_after_christmas(year)]
return days
def get_fixed_holidays(self, year):
days = super(NorthCarolina, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class NorthDakota(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""NorthDakota"""
include_good_friday = True
def get_variable_days(self, year):
days = super(NorthDakota, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(NorthDakota, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Ohio(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Ohio"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(12, 1, "Rosa Parks Day"),
)
def get_variable_days(self, year):
days = super(Ohio, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Ohio, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Oklahoma(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Oklahoma"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(Oklahoma, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Oklahoma, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Oregon(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Oregon"""
def get_variable_days(self, year):
days = super(Oregon, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Oregon, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Pennsylvania(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Pennsylvania"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Pennsylvania, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Pennsylvania, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class RhodeIsland(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""RhodeIsland"""
def get_variable_days(self, year):
days = super(RhodeIsland, self).get_variable_days(year)
days = self.float(days)
days += [(self.get_nth_weekday_in_month(year, 8, MON, 2),
"Victory Day")]
return days
def get_fixed_holidays(self, year):
days = super(RhodeIsland, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class SouthCarolina(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin, DayAfterChristmasNoFloatMixin):
"""SouthCarolina"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(5, 10, "Confederate Memorial Day"),
)
include_good_friday = True
include_christmas_eve = True
def get_variable_days(self, year):
days = super(SouthCarolina, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [self.get_day_after_christmas(year)]
return days
def get_fixed_holidays(self, year):
days = super(SouthCarolina, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class SouthDakota(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""SouthDakota"""
def get_variable_days(self, year):
days = super(SouthDakota, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(SouthDakota, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Tennessee(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Tennessee"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Tennessee, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Tennessee, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Texas(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
CesarChavezDayMixin):
"""Texas"""
include_good_friday = True
def get_variable_days(self, year):
days = super(Texas, self).get_variable_days(year)
days = self.float(days)
days += self.get_chavez_day(year)
return days
def get_fixed_holidays(self, year):
days = super(Texas, self).get_fixed_holidays(year)
days = self.float(days, year)
days += [(date(year, 1, 19), "Confederate Heroes Day")]
days += [(date(year, 3, 2), "Texas Independence Day")]
days += [(date(year, 4, 21), "San Jacinto Day")]
days += [(date(year, 6, 19), "Emancipation Day")]
days += [(date(year, 8, 27), "Lyndon B. Jonhson Day")]
return days
class Utah(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Utah"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(7, 24, "Pioneer Day"),
)
def get_variable_days(self, year):
days = super(Utah, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Utah, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Vermont(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Vermont"""
FIXED_HOLIDAYS = UnitedStates.FIXED_HOLIDAYS + (
(8, 16, "Bennington Battle Day"),
)
def get_variable_days(self, year):
days = super(Vermont, self).get_variable_days(year)
days = self.float(days)
days += [(self.get_nth_weekday_in_month(year, 3, TUE, 1),
"Town Meeting Day")]
return days
def get_fixed_holidays(self, year):
days = super(Vermont, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class Virginia(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin, DayAfterChristmasNoFloatMixin):
"""Virginia"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(Virginia, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [(self.get_nth_weekday_in_month(year, 1, FRI, 3),
"Lee-Jackson Day")]
days += [(self.get_nth_weekday_in_month(year, 11, WED, 4),
"Additional Thanksgiving Holiday")]
return days
def get_fixed_holidays(self, year):
days = super(Virginia, self).get_fixed_holidays(year)
days = self.float(days, year)
days += [self.get_day_after_christmas(year)]
return days
class Washington(UnitedStates, WesternCalendar, ThanksgivingFridayMixin,
FloatToNearestWeekdayMixin):
"""Washington"""
def get_variable_days(self, year):
days = super(Washington, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Washington, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
class WestVirginia(UnitedStates, WesternCalendar, ThanksgivingFridayMixin,
FloatToNearestWeekdayMixin):
"""WestVirginia"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(WestVirginia, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
days += [(date(year, 6, 20), "West Virgina Day")]
if date(year, 6, 20).weekday() == SUN:
days += [(date(year, 6, 21), "West Virgina Day (Observed)")]
return days
def get_fixed_holidays(self, year):
days = super(WestVirginia, self).get_fixed_holidays(year)
days = self.float(days, year)
days += [(date(year, 12, 31), "New Years Eve")]
return days
class Wisconsin(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin,
ThanksgivingFridayMixin):
"""Wisconsin"""
include_christmas_eve = True
def get_variable_days(self, year):
days = super(Wisconsin, self).get_variable_days(year)
days = self.float(days)
days += [self.get_thanksgiving_friday(year)]
return days
def get_fixed_holidays(self, year):
days = super(Wisconsin, self).get_fixed_holidays(year)
days += [(date(year, 12, 31), "New Years Eve")]
days = self.float(days, year)
return days
class Wyoming(UnitedStates, WesternCalendar, FloatToNearestWeekdayMixin):
"""Wyoming"""
def get_variable_days(self, year):
days = super(Wyoming, self).get_variable_days(year)
days = self.float(days)
return days
def get_fixed_holidays(self, year):
days = super(Wyoming, self).get_fixed_holidays(year)
days = self.float(days, year)
return days
| mit |
ccmbioinfo/mugqic_pipelines | bfx/differential_expression.py | 1 | 3243 | #!/usr/bin/env python
################################################################################
# Copyright (C) 2014, 2015 GenAP, McGill University and Genome Quebec Innovation Centre
#
# This file is part of MUGQIC Pipelines.
#
# MUGQIC Pipelines is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# MUGQIC Pipelines is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with MUGQIC Pipelines. If not, see <http://www.gnu.org/licenses/>.
################################################################################
# Python Standard Modules
import os
# MUGQIC Modules
from core.config import *
from core.job import *
def deseq(
design_file,
count_matrix,
output_dir
):
return Job(
[count_matrix],
[os.path.join(output_dir, "deseq_results.csv"), os.path.join(output_dir, "dge_results.csv")],
[
['differential_expression_deseq', 'module_mugqic_tools'],
['differential_expression_deseq', 'module_R']
],
command="""\
Rscript $R_TOOLS/deseq.R \\
-d {design_file} \\
-c {count_matrix} \\
-o {output_dir}""".format(
design_file=design_file,
count_matrix=count_matrix,
output_dir=output_dir
))
def edger(
design_file,
count_matrix,
output_dir
):
return Job(
[count_matrix],
[os.path.join(output_dir, "edger_results.csv")],
[
['differential_expression_edger', 'module_mugqic_tools'],
['differential_expression_edger', 'module_R']
],
command="""\
Rscript $R_TOOLS/edger.R \\
-d {design_file} \\
-c {count_matrix} \\
-o {output_dir}""".format(
design_file=design_file,
count_matrix=count_matrix,
output_dir=output_dir
))
def goseq(
input_file,
input_columns,
output_file,
gene_size_file=None,
gene_ontology_file=None,
):
return Job(
[input_file],
[output_file],
[
['differential_expression_goseq', 'module_mugqic_tools'],
['differential_expression_goseq', 'module_R']
],
command="""\
Rscript $R_TOOLS/goseq.R {other_options} \\
-a {gene_size_file} \\
-G {gene_ontology_file} \\
-d {input_file} \\
-c {input_columns} \\
-o {output_file}""".format(
other_options=config.param('differential_expression_goseq','other_options'),
gene_size_file=gene_size_file if gene_size_file else config.param('differential_expression_goseq', 'gene_size', type='filepath'),
gene_ontology_file=gene_ontology_file if gene_ontology_file else config.param('differential_expression_goseq', 'gene_ontology', type='filepath'),
input_file=input_file,
input_columns=input_columns,
output_file=output_file
))
| lgpl-3.0 |
dennis-sheil/commandergenius | project/jni/python/src/Lib/email/quoprimime.py | 54 | 10839 | # Copyright (C) 2001-2006 Python Software Foundation
# Author: Ben Gertzfield
# Contact: [email protected]
"""Quoted-printable content transfer encoding per RFCs 2045-2047.
This module handles the content transfer encoding method defined in RFC 2045
to encode US ASCII-like 8-bit data called `quoted-printable'. It is used to
safely encode text that is in a character set similar to the 7-bit US ASCII
character set, but that includes some 8-bit characters that are normally not
allowed in email bodies or headers.
Quoted-printable is very space-inefficient for encoding binary files; use the
email.base64MIME module for that instead.
This module provides an interface to encode and decode both headers and bodies
with quoted-printable encoding.
RFC 2045 defines a method for including character set information in an
`encoded-word' in a header. This method is commonly used for 8-bit real names
in To:/From:/Cc: etc. fields, as well as Subject: lines.
This module does not do the line wrapping or end-of-line character
conversion necessary for proper internationalized headers; it only
does dumb encoding and decoding. To deal with the various line
wrapping issues, use the email.Header module.
"""
__all__ = [
'body_decode',
'body_encode',
'body_quopri_check',
'body_quopri_len',
'decode',
'decodestring',
'encode',
'encodestring',
'header_decode',
'header_encode',
'header_quopri_check',
'header_quopri_len',
'quote',
'unquote',
]
import re
from string import hexdigits
from email.utils import fix_eols
CRLF = '\r\n'
NL = '\n'
# See also Charset.py
MISC_LEN = 7
hqre = re.compile(r'[^-a-zA-Z0-9!*+/ ]')
bqre = re.compile(r'[^ !-<>-~\t]')
# Helpers
def header_quopri_check(c):
"""Return True if the character should be escaped with header quopri."""
return bool(hqre.match(c))
def body_quopri_check(c):
"""Return True if the character should be escaped with body quopri."""
return bool(bqre.match(c))
def header_quopri_len(s):
"""Return the length of str when it is encoded with header quopri."""
count = 0
for c in s:
if hqre.match(c):
count += 3
else:
count += 1
return count
def body_quopri_len(str):
"""Return the length of str when it is encoded with body quopri."""
count = 0
for c in str:
if bqre.match(c):
count += 3
else:
count += 1
return count
def _max_append(L, s, maxlen, extra=''):
if not L:
L.append(s.lstrip())
elif len(L[-1]) + len(s) <= maxlen:
L[-1] += extra + s
else:
L.append(s.lstrip())
def unquote(s):
"""Turn a string in the form =AB to the ASCII character with value 0xab"""
return chr(int(s[1:3], 16))
def quote(c):
return "=%02X" % ord(c)
def header_encode(header, charset="iso-8859-1", keep_eols=False,
maxlinelen=76, eol=NL):
"""Encode a single header line with quoted-printable (like) encoding.
Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
used specifically for email header fields to allow charsets with mostly 7
bit characters (and some 8 bit) to remain more or less readable in non-RFC
2045 aware mail clients.
charset names the character set to use to encode the header. It defaults
to iso-8859-1.
The resulting string will be in the form:
"=?charset?q?I_f=E2rt_in_your_g=E8n=E8ral_dire=E7tion?\\n
=?charset?q?Silly_=C8nglish_Kn=EEghts?="
with each line wrapped safely at, at most, maxlinelen characters (defaults
to 76 characters). If maxlinelen is None, the entire string is encoded in
one chunk with no splitting.
End-of-line characters (\\r, \\n, \\r\\n) will be automatically converted
to the canonical email line separator \\r\\n unless the keep_eols
parameter is True (the default is False).
Each line of the header will be terminated in the value of eol, which
defaults to "\\n". Set this to "\\r\\n" if you are using the result of
this function directly in email.
"""
# Return empty headers unchanged
if not header:
return header
if not keep_eols:
header = fix_eols(header)
# Quopri encode each line, in encoded chunks no greater than maxlinelen in
# length, after the RFC chrome is added in.
quoted = []
if maxlinelen is None:
# An obnoxiously large number that's good enough
max_encoded = 100000
else:
max_encoded = maxlinelen - len(charset) - MISC_LEN - 1
for c in header:
# Space may be represented as _ instead of =20 for readability
if c == ' ':
_max_append(quoted, '_', max_encoded)
# These characters can be included verbatim
elif not hqre.match(c):
_max_append(quoted, c, max_encoded)
# Otherwise, replace with hex value like =E2
else:
_max_append(quoted, "=%02X" % ord(c), max_encoded)
# Now add the RFC chrome to each encoded chunk and glue the chunks
# together. BAW: should we be able to specify the leading whitespace in
# the joiner?
joiner = eol + ' '
return joiner.join(['=?%s?q?%s?=' % (charset, line) for line in quoted])
def encode(body, binary=False, maxlinelen=76, eol=NL):
"""Encode with quoted-printable, wrapping at maxlinelen characters.
If binary is False (the default), end-of-line characters will be converted
to the canonical email end-of-line sequence \\r\\n. Otherwise they will
be left verbatim.
Each line of encoded text will end with eol, which defaults to "\\n". Set
this to "\\r\\n" if you will be using the result of this function directly
in an email.
Each line will be wrapped at, at most, maxlinelen characters (defaults to
76 characters). Long lines will have the `soft linefeed' quoted-printable
character "=" appended to them, so the decoded text will be identical to
the original text.
"""
if not body:
return body
if not binary:
body = fix_eols(body)
# BAW: We're accumulating the body text by string concatenation. That
# can't be very efficient, but I don't have time now to rewrite it. It
# just feels like this algorithm could be more efficient.
encoded_body = ''
lineno = -1
# Preserve line endings here so we can check later to see an eol needs to
# be added to the output later.
lines = body.splitlines(1)
for line in lines:
# But strip off line-endings for processing this line.
if line.endswith(CRLF):
line = line[:-2]
elif line[-1] in CRLF:
line = line[:-1]
lineno += 1
encoded_line = ''
prev = None
linelen = len(line)
# Now we need to examine every character to see if it needs to be
# quopri encoded. BAW: again, string concatenation is inefficient.
for j in range(linelen):
c = line[j]
prev = c
if bqre.match(c):
c = quote(c)
elif j+1 == linelen:
# Check for whitespace at end of line; special case
if c not in ' \t':
encoded_line += c
prev = c
continue
# Check to see to see if the line has reached its maximum length
if len(encoded_line) + len(c) >= maxlinelen:
encoded_body += encoded_line + '=' + eol
encoded_line = ''
encoded_line += c
# Now at end of line..
if prev and prev in ' \t':
# Special case for whitespace at end of file
if lineno + 1 == len(lines):
prev = quote(prev)
if len(encoded_line) + len(prev) > maxlinelen:
encoded_body += encoded_line + '=' + eol + prev
else:
encoded_body += encoded_line + prev
# Just normal whitespace at end of line
else:
encoded_body += encoded_line + prev + '=' + eol
encoded_line = ''
# Now look at the line we just finished and it has a line ending, we
# need to add eol to the end of the line.
if lines[lineno].endswith(CRLF) or lines[lineno][-1] in CRLF:
encoded_body += encoded_line + eol
else:
encoded_body += encoded_line
encoded_line = ''
return encoded_body
# For convenience and backwards compatibility w/ standard base64 module
body_encode = encode
encodestring = encode
# BAW: I'm not sure if the intent was for the signature of this function to be
# the same as base64MIME.decode() or not...
def decode(encoded, eol=NL):
"""Decode a quoted-printable string.
Lines are separated with eol, which defaults to \\n.
"""
if not encoded:
return encoded
# BAW: see comment in encode() above. Again, we're building up the
# decoded string with string concatenation, which could be done much more
# efficiently.
decoded = ''
for line in encoded.splitlines():
line = line.rstrip()
if not line:
decoded += eol
continue
i = 0
n = len(line)
while i < n:
c = line[i]
if c != '=':
decoded += c
i += 1
# Otherwise, c == "=". Are we at the end of the line? If so, add
# a soft line break.
elif i+1 == n:
i += 1
continue
# Decode if in form =AB
elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
decoded += unquote(line[i:i+3])
i += 3
# Otherwise, not in form =AB, pass literally
else:
decoded += c
i += 1
if i == n:
decoded += eol
# Special case if original string did not end with eol
if not encoded.endswith(eol) and decoded.endswith(eol):
decoded = decoded[:-1]
return decoded
# For convenience and backwards compatibility w/ standard base64 module
body_decode = decode
decodestring = decode
def _unquote_match(match):
"""Turn a match in the form =AB to the ASCII character with value 0xab"""
s = match.group(0)
return unquote(s)
# Header decoding is done a bit differently
def header_decode(s):
"""Decode a string encoded with RFC 2045 MIME header `Q' encoding.
This function does not parse a full MIME header value encoded with
quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
the high level email.Header class for that functionality.
"""
s = s.replace('_', ' ')
return re.sub(r'=\w{2}', _unquote_match, s)
| lgpl-2.1 |
cruzegoodin/TSC-ShippingDetails | venv/lib/python2.7/site-packages/pip/baseparser.py | 149 | 9643 | """Base option parser setup"""
from __future__ import absolute_import
import sys
import optparse
import os
import re
import textwrap
from distutils.util import strtobool
from pip._vendor.six import string_types
from pip._vendor.six.moves import configparser
from pip.locations import (
legacy_config_file, config_basename, running_under_virtualenv,
site_config_files
)
from pip.utils import appdirs, get_terminal_size
_environ_prefix_re = re.compile(r"^PIP_", re.I)
class PrettyHelpFormatter(optparse.IndentedHelpFormatter):
"""A prettier/less verbose help formatter for optparse."""
def __init__(self, *args, **kwargs):
# help position must be aligned with __init__.parseopts.description
kwargs['max_help_position'] = 30
kwargs['indent_increment'] = 1
kwargs['width'] = get_terminal_size()[0] - 2
optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
def format_option_strings(self, option):
return self._format_option_strings(option, ' <%s>', ', ')
def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '):
"""
Return a comma-separated list of option strings and metavars.
:param option: tuple of (short opt, long opt), e.g: ('-f', '--format')
:param mvarfmt: metavar format string - evaluated as mvarfmt % metavar
:param optsep: separator
"""
opts = []
if option._short_opts:
opts.append(option._short_opts[0])
if option._long_opts:
opts.append(option._long_opts[0])
if len(opts) > 1:
opts.insert(1, optsep)
if option.takes_value():
metavar = option.metavar or option.dest.lower()
opts.append(mvarfmt % metavar.lower())
return ''.join(opts)
def format_heading(self, heading):
if heading == 'Options':
return ''
return heading + ':\n'
def format_usage(self, usage):
"""
Ensure there is only one newline between usage and the first heading
if there is no description.
"""
msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ")
return msg
def format_description(self, description):
# leave full control over description to us
if description:
if hasattr(self.parser, 'main'):
label = 'Commands'
else:
label = 'Description'
# some doc strings have initial newlines, some don't
description = description.lstrip('\n')
# some doc strings have final newlines and spaces, some don't
description = description.rstrip()
# dedent, then reindent
description = self.indent_lines(textwrap.dedent(description), " ")
description = '%s:\n%s\n' % (label, description)
return description
else:
return ''
def format_epilog(self, epilog):
# leave full control over epilog to us
if epilog:
return epilog
else:
return ''
def indent_lines(self, text, indent):
new_lines = [indent + line for line in text.split('\n')]
return "\n".join(new_lines)
class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter):
"""Custom help formatter for use in ConfigOptionParser that updates
the defaults before expanding them, allowing them to show up correctly
in the help listing"""
def expand_default(self, option):
if self.parser is not None:
self.parser.update_defaults(self.parser.defaults)
return optparse.IndentedHelpFormatter.expand_default(self, option)
class CustomOptionParser(optparse.OptionParser):
def insert_option_group(self, idx, *args, **kwargs):
"""Insert an OptionGroup at a given position."""
group = self.add_option_group(*args, **kwargs)
self.option_groups.pop()
self.option_groups.insert(idx, group)
return group
@property
def option_list_all(self):
"""Get a list of all options, including those in option groups."""
res = self.option_list[:]
for i in self.option_groups:
res.extend(i.option_list)
return res
class ConfigOptionParser(CustomOptionParser):
"""Custom option parser which updates its defaults by checking the
configuration files and environmental variables"""
isolated = False
def __init__(self, *args, **kwargs):
self.config = configparser.RawConfigParser()
self.name = kwargs.pop('name')
self.isolated = kwargs.pop("isolated", False)
self.files = self.get_config_files()
if self.files:
self.config.read(self.files)
assert self.name
optparse.OptionParser.__init__(self, *args, **kwargs)
def get_config_files(self):
# the files returned by this method will be parsed in order with the
# first files listed being overridden by later files in standard
# ConfigParser fashion
config_file = os.environ.get('PIP_CONFIG_FILE', False)
if config_file == os.devnull:
return []
# at the base we have any site-wide configuration
files = list(site_config_files)
# per-user configuration next
if not self.isolated:
if config_file and os.path.exists(config_file):
files.append(config_file)
else:
# This is the legacy config file, we consider it to be a lower
# priority than the new file location.
files.append(legacy_config_file)
# This is the new config file, we consider it to be a higher
# priority than the legacy file.
files.append(
os.path.join(
appdirs.user_config_dir("pip"),
config_basename,
)
)
# finally virtualenv configuration first trumping others
if running_under_virtualenv():
venv_config_file = os.path.join(
sys.prefix,
config_basename,
)
if os.path.exists(venv_config_file):
files.append(venv_config_file)
return files
def check_default(self, option, key, val):
try:
return option.check_value(key, val)
except optparse.OptionValueError as exc:
print("An error occurred during configuration: %s" % exc)
sys.exit(3)
def update_defaults(self, defaults):
"""Updates the given defaults with values from the config files and
the environ. Does a little special handling for certain types of
options (lists)."""
# Then go and look for the other sources of configuration:
config = {}
# 1. config files
for section in ('global', self.name):
config.update(
self.normalize_keys(self.get_config_section(section))
)
# 2. environmental variables
if not self.isolated:
config.update(self.normalize_keys(self.get_environ_vars()))
# Then set the options with those values
for key, val in config.items():
option = self.get_option(key)
if option is not None:
# ignore empty values
if not val:
continue
if option.action in ('store_true', 'store_false', 'count'):
val = strtobool(val)
if option.action == 'append':
val = val.split()
val = [self.check_default(option, key, v) for v in val]
else:
val = self.check_default(option, key, val)
defaults[option.dest] = val
return defaults
def normalize_keys(self, items):
"""Return a config dictionary with normalized keys regardless of
whether the keys were specified in environment variables or in config
files"""
normalized = {}
for key, val in items:
key = key.replace('_', '-')
if not key.startswith('--'):
key = '--%s' % key # only prefer long opts
normalized[key] = val
return normalized
def get_config_section(self, name):
"""Get a section of a configuration"""
if self.config.has_section(name):
return self.config.items(name)
return []
def get_environ_vars(self):
"""Returns a generator with all environmental vars with prefix PIP_"""
for key, val in os.environ.items():
if _environ_prefix_re.search(key):
yield (_environ_prefix_re.sub("", key).lower(), val)
def get_default_values(self):
"""Overridding to make updating the defaults after instantiation of
the option parser possible, update_defaults() does the dirty work."""
if not self.process_default_values:
# Old, pre-Optik 1.5 behaviour.
return optparse.Values(self.defaults)
defaults = self.update_defaults(self.defaults.copy()) # ours
for option in self._get_all_options():
default = defaults.get(option.dest)
if isinstance(default, string_types):
opt_str = option.get_opt_string()
defaults[option.dest] = option.check_value(opt_str, default)
return optparse.Values(defaults)
def error(self, msg):
self.print_usage(sys.stderr)
self.exit(2, "%s\n" % msg)
| bsd-3-clause |
mogoweb/webkit_for_android5.1 | webkit/Tools/QueueStatusServer/handlers/patchstatus.py | 146 | 1974 | # Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from google.appengine.ext import webapp
from model.queuestatus import QueueStatus
class PatchStatus(webapp.RequestHandler):
def get(self, queue_name, attachment_id):
statuses = QueueStatus.all().filter('queue_name =', queue_name).filter('active_patch_id =', int(attachment_id)).order('-date').fetch(1)
if not statuses:
self.error(404)
return
self.response.out.write(statuses[0].message)
| apache-2.0 |
nazo/ansible | lib/ansible/modules/network/aos/aos_rack_type.py | 42 | 7616 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_rack_type
author: Damien Garros (@dgarros)
version_added: "2.3"
short_description: Manage AOS Rack Type
description:
- Apstra AOS Rack Type module let you manage your Rack Type easily.
You can create create and delete Rack Type by Name, ID or by using a JSON File.
This module is idempotent and support the I(check) mode.
It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Rack Type to manage.
Only one of I(name), I(id) or I(content) can be set.
id:
description:
- AOS Id of the Rack Type to manage (can't be used to create a new Rack Type),
Only one of I(name), I(id) or I(content) can be set.
content:
description:
- Datastructure of the Rack Type to create. The data can be in YAML / JSON or
directly a variable. It's the same datastructure that is returned
on success in I(value).
state:
description:
- Indicate what is the expected state of the Rack Type (present or not).
default: present
choices: ['present', 'absent']
'''
EXAMPLES = '''
- name: "Delete a Rack Type by name"
aos_rack_type:
session: "{{ aos_session }}"
name: "my-rack-type"
state: absent
- name: "Delete a Rack Type by id"
aos_rack_type:
session: "{{ aos_session }}"
id: "45ab26fc-c2ed-4307-b330-0870488fa13e"
state: absent
# Save a Rack Type to a file
- name: "Access Rack Type 1/3"
aos_rack_type:
session: "{{ aos_session }}"
name: "my-rack-type"
state: present
register: rack_type
- name: "Save Rack Type into a JSON file 2/3"
copy:
content: "{{ rack_type.value | to_nice_json }}"
dest: rack_type_saved.json
- name: "Save Rack Type into a YAML file 3/3"
copy:
content: "{{ rack_type.value | to_nice_yaml }}"
dest: rack_type_saved.yaml
- name: "Load Rack Type from a JSON file"
aos_rack_type:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/rack_type_saved.json') }}"
state: present
- name: "Load Rack Type from a YAML file"
aos_rack_type:
session: "{{ aos_session }}"
content: "{{ lookup('file', 'resources/rack_type_saved.yaml') }}"
state: present
'''
RETURNS = '''
name:
description: Name of the Rack Type
returned: always
type: str
sample: AOS-1x25-1
id:
description: AOS unique ID assigned to the Rack Type
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Value of the object as returned by the AOS Server
returned: always
type: dict
sample: {'...'}
'''
import json
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, find_collection_item, do_load_resource, check_aos_version, content_to_dict
#########################################################
# State Processing
#########################################################
def rack_type_absent(module, aos, my_rack_type):
margs = module.params
# If the module do not exist, return directly
if my_rack_type.exists is False:
module.exit_json(changed=False,
name=margs['name'],
id=margs['id'],
value={})
# If not in check mode, delete Rack Type
if not module.check_mode:
try:
my_rack_type.delete()
except:
module.fail_json(msg="An error occured, while trying to delete the Rack Type")
module.exit_json( changed=True,
name=my_rack_type.name,
id=my_rack_type.id,
value={} )
def rack_type_present(module, aos, my_rack_type):
margs = module.params
if margs['content'] is not None:
if 'display_name' in module.params['content'].keys():
do_load_resource(module, aos.RackTypes, module.params['content']['display_name'])
else:
module.fail_json(msg="Unable to find display_name in 'content', Mandatory")
# if rack_type doesn't exist already, create a new one
if my_rack_type.exists is False and 'content' not in margs.keys():
module.fail_json(msg="'content' is mandatory for module that don't exist currently")
module.exit_json( changed=False,
name=my_rack_type.name,
id=my_rack_type.id,
value=my_rack_type.value )
#########################################################
# Main Function
#########################################################
def rack_type(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['content'] is not None:
content = content_to_dict(module, margs['content'] )
if 'display_name' in content.keys():
item_name = content['display_name']
else:
module.fail_json(msg="Unable to extract 'display_name' from 'content'")
elif margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
my_rack_type = find_collection_item(aos.RackTypes,
item_name=item_name,
item_id=item_id)
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
rack_type_absent(module, aos, my_rack_type)
elif margs['state'] == 'present':
rack_type_present(module, aos, my_rack_type)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False ),
id=dict(required=False ),
content=dict(required=False, type="json"),
state=dict( required=False,
choices=['present', 'absent'],
default="present")
),
mutually_exclusive = [('name', 'id', 'content')],
required_one_of=[('name', 'id', 'content')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
rack_type(module)
if __name__ == "__main__":
main()
| gpl-3.0 |
cbrucks/keystone_ldap | keystone/config.py | 1 | 5361 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import sys
import os
from keystone.common import logging
from keystone.openstack.common import cfg
gettext.install('keystone', unicode=1)
class ConfigMixin(object):
def __call__(self, config_files=None, *args, **kw):
if config_files is not None:
self._opts['config_file']['opt'].default = config_files
kw.setdefault('args', [])
return super(ConfigMixin, self).__call__(*args, **kw)
def set_usage(self, usage):
self.usage = usage
self._oparser.usage = usage
class Config(ConfigMixin, cfg.ConfigOpts):
pass
class CommonConfig(ConfigMixin, cfg.CommonConfigOpts):
pass
def setup_logging(conf):
"""
Sets up the logging options for a log with supplied name
:param conf: a cfg.ConfOpts object
"""
if conf.log_config:
# Use a logging configuration file for all settings...
if os.path.exists(conf.log_config):
logging.config.fileConfig(conf.log_config)
return
else:
raise RuntimeError('Unable to locate specified logging '
'config file: %s' % conf.log_config)
root_logger = logging.root
if conf.debug:
root_logger.setLevel(logging.DEBUG)
elif conf.verbose:
root_logger.setLevel(logging.INFO)
else:
root_logger.setLevel(logging.WARNING)
formatter = logging.Formatter(conf.log_format, conf.log_date_format)
if conf.use_syslog:
try:
facility = getattr(logging.SysLogHandler,
conf.syslog_log_facility)
except AttributeError:
raise ValueError(_('Invalid syslog facility'))
handler = logging.SysLogHandler(address='/dev/log',
facility=facility)
elif conf.log_file:
logfile = conf.log_file
if conf.log_dir:
logfile = os.path.join(conf.log_dir, logfile)
handler = logging.WatchedFileHandler(logfile)
else:
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
def register_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_opt(cfg.StrOpt(*args, **kw), group=group)
def register_cli_str(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_cli_opt(cfg.StrOpt(*args, **kw), group=group)
def register_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_cli_bool(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_cli_opt(cfg.BoolOpt(*args, **kw), group=group)
def register_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_opt(cfg.IntOpt(*args, **kw), group=group)
def register_cli_int(*args, **kw):
conf = kw.pop('conf', CONF)
group = _ensure_group(kw, conf)
return conf.register_cli_opt(cfg.IntOpt(*args, **kw), group=group)
def _ensure_group(kw, conf):
group = kw.pop('group', None)
if group:
conf.register_group(cfg.OptGroup(name=group))
return group
CONF = CommonConfig(project='keystone')
register_str('admin_token', default='ADMIN')
register_str('bind_host', default='0.0.0.0')
register_str('compute_port')
register_str('admin_port')
register_str('public_port')
register_int('max_param_size', default=64)
# sql options
register_str('connection', group='sql')
register_int('idle_timeout', group='sql')
register_str('driver', group='catalog')
register_str('driver', group='identity')
register_str('driver', group='policy')
register_str('driver', group='token')
register_str('driver', group='ec2')
#ldap
register_str('url', group='ldap')
register_str('user', group='ldap')
register_str('password', group='ldap')
register_str('suffix', group='ldap')
register_bool('use_dumb_member', group='ldap')
register_str('user_tree_dn', group='ldap')
register_str('user_objectclass', group='ldap')
register_str('user_id_attribute', group='ldap')
register_str('user_name_attribute', group='ldap', default='sn')
register_str('tenant_tree_dn', group='ldap')
register_str('tenant_objectclass', group='ldap')
register_str('tenant_id_attribute', group='ldap')
register_str('tenant_member_attribute', group='ldap')
register_str('tenant_name_attribute', group='ldap', default='ou')
register_str('role_tree_dn', group='ldap')
register_str('role_objectclass', group='ldap')
register_str('role_id_attribute', group='ldap')
register_str('role_member_attribute', group='ldap')
| apache-2.0 |
pasiegel/SickGear | lib/hachoir_core/field/float.py | 90 | 3547 | from lib.hachoir_core.field import Bit, Bits, FieldSet
from lib.hachoir_core.endian import BIG_ENDIAN, LITTLE_ENDIAN
import struct
# Make sure that we use right struct types
assert struct.calcsize("f") == 4
assert struct.calcsize("d") == 8
assert struct.unpack("<d", "\x1f\x85\xebQ\xb8\x1e\t@")[0] == 3.14
assert struct.unpack(">d", "\xc0\0\0\0\0\0\0\0")[0] == -2.0
class FloatMantissa(Bits):
def createValue(self):
value = Bits.createValue(self)
return 1 + float(value) / (2 ** self.size)
def createRawDisplay(self):
return unicode(Bits.createValue(self))
class FloatExponent(Bits):
def __init__(self, parent, name, size):
Bits.__init__(self, parent, name, size)
self.bias = 2 ** (size-1) - 1
def createValue(self):
return Bits.createValue(self) - self.bias
def createRawDisplay(self):
return unicode(self.value + self.bias)
def floatFactory(name, format, mantissa_bits, exponent_bits, doc):
size = 1 + mantissa_bits + exponent_bits
class Float(FieldSet):
static_size = size
__doc__ = doc
def __init__(self, parent, name, description=None):
assert parent.endian in (BIG_ENDIAN, LITTLE_ENDIAN)
FieldSet.__init__(self, parent, name, description, size)
if format:
if self._parent.endian == BIG_ENDIAN:
self.struct_format = ">"+format
else:
self.struct_format = "<"+format
else:
self.struct_format = None
def createValue(self):
"""
Create float value: use struct.unpack() when it's possible
(32 and 64-bit float) or compute it with :
mantissa * (2.0 ** exponent)
This computation may raise an OverflowError.
"""
if self.struct_format:
raw = self._parent.stream.readBytes(
self.absolute_address, self._size//8)
try:
return struct.unpack(self.struct_format, raw)[0]
except struct.error, err:
raise ValueError("[%s] conversion error: %s" %
(self.__class__.__name__, err))
else:
try:
value = self["mantissa"].value * (2.0 ** float(self["exponent"].value))
if self["negative"].value:
return -(value)
else:
return value
except OverflowError:
raise ValueError("[%s] floating point overflow" %
self.__class__.__name__)
def createFields(self):
yield Bit(self, "negative")
yield FloatExponent(self, "exponent", exponent_bits)
if 64 <= mantissa_bits:
yield Bit(self, "one")
yield FloatMantissa(self, "mantissa", mantissa_bits-1)
else:
yield FloatMantissa(self, "mantissa", mantissa_bits)
cls = Float
cls.__name__ = name
return cls
# 32-bit float (standart: IEEE 754/854)
Float32 = floatFactory("Float32", "f", 23, 8,
"Floating point number: format IEEE 754 int 32 bit")
# 64-bit float (standart: IEEE 754/854)
Float64 = floatFactory("Float64", "d", 52, 11,
"Floating point number: format IEEE 754 in 64 bit")
# 80-bit float (standart: IEEE 754/854)
Float80 = floatFactory("Float80", None, 64, 15,
"Floating point number: format IEEE 754 in 80 bit")
| gpl-3.0 |
40223236/w16b_test | static/Brython3.1.1-20150328-091302/Lib/xml/sax/__init__.py | 637 | 3505 | """Simple API for XML (SAX) implementation for Python.
This module provides an implementation of the SAX 2 interface;
information about the Java version of the interface can be found at
http://www.megginson.com/SAX/. The Python version of the interface is
documented at <...>.
This package contains the following modules:
handler -- Base classes and constants which define the SAX 2 API for
the 'client-side' of SAX for Python.
saxutils -- Implementation of the convenience classes commonly used to
work with SAX.
xmlreader -- Base classes and constants which define the SAX 2 API for
the parsers used with SAX for Python.
expatreader -- Driver that allows use of the Expat parser with SAX.
"""
from .xmlreader import InputSource
from .handler import ContentHandler, ErrorHandler
from ._exceptions import SAXException, SAXNotRecognizedException, \
SAXParseException, SAXNotSupportedException, \
SAXReaderNotAvailable
def parse(source, handler, errorHandler=ErrorHandler()):
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
parser.parse(source)
def parseString(string, handler, errorHandler=ErrorHandler()):
from io import BytesIO
if errorHandler is None:
errorHandler = ErrorHandler()
parser = make_parser()
parser.setContentHandler(handler)
parser.setErrorHandler(errorHandler)
inpsrc = InputSource()
inpsrc.setByteStream(BytesIO(string))
parser.parse(inpsrc)
# this is the parser list used by the make_parser function if no
# alternatives are given as parameters to the function
default_parser_list = ["xml.sax.expatreader"]
# tell modulefinder that importing sax potentially imports expatreader
_false = 0
if _false:
import xml.sax.expatreader
import os, sys
#if "PY_SAX_PARSER" in os.environ:
# default_parser_list = os.environ["PY_SAX_PARSER"].split(",")
del os
_key = "python.xml.sax.parser"
if sys.platform[:4] == "java" and sys.registry.containsKey(_key):
default_parser_list = sys.registry.getProperty(_key).split(",")
def make_parser(parser_list = []):
"""Creates and returns a SAX parser.
Creates the first parser it is able to instantiate of the ones
given in the list created by doing parser_list +
default_parser_list. The lists must contain the names of Python
modules containing both a SAX parser and a create_parser function."""
for parser_name in parser_list + default_parser_list:
try:
return _create_parser(parser_name)
except ImportError as e:
import sys
if parser_name in sys.modules:
# The parser module was found, but importing it
# failed unexpectedly, pass this exception through
raise
except SAXReaderNotAvailable:
# The parser module detected that it won't work properly,
# so try the next one
pass
raise SAXReaderNotAvailable("No parsers found", None)
# --- Internal utility methods used by make_parser
if sys.platform[ : 4] == "java":
def _create_parser(parser_name):
from org.python.core import imp
drv_module = imp.importName(parser_name, 0, globals())
return drv_module.create_parser()
else:
def _create_parser(parser_name):
drv_module = __import__(parser_name,{},{},['create_parser'])
return drv_module.create_parser()
del sys
| agpl-3.0 |
brandon-rhodes/numpy | numpy/core/records.py | 10 | 28847 | """
Record Arrays
=============
Record arrays expose the fields of structured arrays as properties.
Most commonly, ndarrays contain elements of a single type, e.g. floats,
integers, bools etc. However, it is possible for elements to be combinations
of these using structured types, such as::
>>> a = np.array([(1, 2.0), (1, 2.0)], dtype=[('x', int), ('y', float)])
>>> a
array([(1, 2.0), (1, 2.0)],
dtype=[('x', '<i4'), ('y', '<f8')])
Here, each element consists of two fields: x (and int), and y (a float).
This is known as a structured array. The different fields are analogous
to columns in a spread-sheet. The different fields can be accessed as
one would a dictionary::
>>> a['x']
array([1, 1])
>>> a['y']
array([ 2., 2.])
Record arrays allow us to access fields as properties::
>>> ar = np.rec.array(a)
>>> ar.x
array([1, 1])
>>> ar.y
array([ 2., 2.])
"""
from __future__ import division, absolute_import, print_function
import sys
import os
from . import numeric as sb
from . import numerictypes as nt
from numpy.compat import isfileobj, bytes, long
# All of the functions allow formats to be a dtype
__all__ = ['record', 'recarray', 'format_parser']
ndarray = sb.ndarray
_byteorderconv = {'b':'>',
'l':'<',
'n':'=',
'B':'>',
'L':'<',
'N':'=',
'S':'s',
's':'s',
'>':'>',
'<':'<',
'=':'=',
'|':'|',
'I':'|',
'i':'|'}
# formats regular expression
# allows multidimension spec with a tuple syntax in front
# of the letter code '(2,3)f4' and ' ( 2 , 3 ) f4 '
# are equally allowed
numfmt = nt.typeDict
def find_duplicate(list):
"""Find duplication in a list, return a list of duplicated elements"""
dup = []
for i in range(len(list)):
if (list[i] in list[i + 1:]):
if (list[i] not in dup):
dup.append(list[i])
return dup
class format_parser:
"""
Class to convert formats, names, titles description to a dtype.
After constructing the format_parser object, the dtype attribute is
the converted data-type:
``dtype = format_parser(formats, names, titles).dtype``
Attributes
----------
dtype : dtype
The converted data-type.
Parameters
----------
formats : str or list of str
The format description, either specified as a string with
comma-separated format descriptions in the form ``'f8, i4, a5'``, or
a list of format description strings in the form
``['f8', 'i4', 'a5']``.
names : str or list/tuple of str
The field names, either specified as a comma-separated string in the
form ``'col1, col2, col3'``, or as a list or tuple of strings in the
form ``['col1', 'col2', 'col3']``.
An empty list can be used, in that case default field names
('f0', 'f1', ...) are used.
titles : sequence
Sequence of title strings. An empty list can be used to leave titles
out.
aligned : bool, optional
If True, align the fields by padding as the C-compiler would.
Default is False.
byteorder : str, optional
If specified, all the fields will be changed to the
provided byte-order. Otherwise, the default byte-order is
used. For all available string specifiers, see `dtype.newbyteorder`.
See Also
--------
dtype, typename, sctype2char
Examples
--------
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... ['T1', 'T2', 'T3']).dtype
dtype([(('T1', 'col1'), '<f8'), (('T2', 'col2'), '<i4'),
(('T3', 'col3'), '|S5')])
`names` and/or `titles` can be empty lists. If `titles` is an empty list,
titles will simply not appear. If `names` is empty, default field names
will be used.
>>> np.format_parser(['f8', 'i4', 'a5'], ['col1', 'col2', 'col3'],
... []).dtype
dtype([('col1', '<f8'), ('col2', '<i4'), ('col3', '|S5')])
>>> np.format_parser(['f8', 'i4', 'a5'], [], []).dtype
dtype([('f0', '<f8'), ('f1', '<i4'), ('f2', '|S5')])
"""
def __init__(self, formats, names, titles, aligned=False, byteorder=None):
self._parseFormats(formats, aligned)
self._setfieldnames(names, titles)
self._createdescr(byteorder)
self.dtype = self._descr
def _parseFormats(self, formats, aligned=0):
""" Parse the field formats """
if formats is None:
raise ValueError("Need formats argument")
if isinstance(formats, list):
if len(formats) < 2:
formats.append('')
formats = ','.join(formats)
dtype = sb.dtype(formats, aligned)
fields = dtype.fields
if fields is None:
dtype = sb.dtype([('f1', dtype)], aligned)
fields = dtype.fields
keys = dtype.names
self._f_formats = [fields[key][0] for key in keys]
self._offsets = [fields[key][1] for key in keys]
self._nfields = len(keys)
def _setfieldnames(self, names, titles):
"""convert input field names into a list and assign to the _names
attribute """
if (names):
if (type(names) in [list, tuple]):
pass
elif isinstance(names, str):
names = names.split(',')
else:
raise NameError("illegal input names %s" % repr(names))
self._names = [n.strip() for n in names[:self._nfields]]
else:
self._names = []
# if the names are not specified, they will be assigned as
# "f0, f1, f2,..."
# if not enough names are specified, they will be assigned as "f[n],
# f[n+1],..." etc. where n is the number of specified names..."
self._names += ['f%d' % i for i in range(len(self._names),
self._nfields)]
# check for redundant names
_dup = find_duplicate(self._names)
if _dup:
raise ValueError("Duplicate field names: %s" % _dup)
if (titles):
self._titles = [n.strip() for n in titles[:self._nfields]]
else:
self._titles = []
titles = []
if (self._nfields > len(titles)):
self._titles += [None] * (self._nfields - len(titles))
def _createdescr(self, byteorder):
descr = sb.dtype({'names':self._names,
'formats':self._f_formats,
'offsets':self._offsets,
'titles':self._titles})
if (byteorder is not None):
byteorder = _byteorderconv[byteorder[0]]
descr = descr.newbyteorder(byteorder)
self._descr = descr
class record(nt.void):
"""A data-type scalar that allows field access as attribute lookup.
"""
# manually set name and module so that this class's type shows up
# as numpy.record when printed
__name__ = 'record'
__module__ = 'numpy'
def __repr__(self):
return self.__str__()
def __str__(self):
return str(self.item())
def __getattribute__(self, attr):
if attr in ['setfield', 'getfield', 'dtype']:
return nt.void.__getattribute__(self, attr)
try:
return nt.void.__getattribute__(self, attr)
except AttributeError:
pass
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
obj = self.getfield(*res[:2])
# if it has fields return a record,
# otherwise return the object
try:
dt = obj.dtype
except AttributeError:
#happens if field is Object type
return obj
if dt.fields:
return obj.view((record, obj.dtype.descr))
return obj
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def __setattr__(self, attr, val):
if attr in ['setfield', 'getfield', 'dtype']:
raise AttributeError("Cannot set '%s' attribute" % attr)
fielddict = nt.void.__getattribute__(self, 'dtype').fields
res = fielddict.get(attr, None)
if res:
return self.setfield(val, *res[:2])
else:
if getattr(self, attr, None):
return nt.void.__setattr__(self, attr, val)
else:
raise AttributeError("'record' object has no "
"attribute '%s'" % attr)
def pprint(self):
"""Pretty-print all fields."""
# pretty-print all fields
names = self.dtype.names
maxlen = max(len(name) for name in names)
rows = []
fmt = '%% %ds: %%s' % maxlen
for name in names:
rows.append(fmt % (name, getattr(self, name)))
return "\n".join(rows)
# The recarray is almost identical to a standard array (which supports
# named fields already) The biggest difference is that it can use
# attribute-lookup to find the fields and it is constructed using
# a record.
# If byteorder is given it forces a particular byteorder on all
# the fields (and any subfields)
class recarray(ndarray):
"""
Construct an ndarray that allows field access using attributes.
Arrays may have a data-types containing fields, analogous
to columns in a spread sheet. An example is ``[(x, int), (y, float)]``,
where each entry in the array is a pair of ``(int, float)``. Normally,
these attributes are accessed using dictionary lookups such as ``arr['x']``
and ``arr['y']``. Record arrays allow the fields to be accessed as members
of the array, using ``arr.x`` and ``arr.y``.
Parameters
----------
shape : tuple
Shape of output array.
dtype : data-type, optional
The desired data-type. By default, the data-type is determined
from `formats`, `names`, `titles`, `aligned` and `byteorder`.
formats : list of data-types, optional
A list containing the data-types for the different columns, e.g.
``['i4', 'f8', 'i4']``. `formats` does *not* support the new
convention of using types directly, i.e. ``(int, float, int)``.
Note that `formats` must be a list, not a tuple.
Given that `formats` is somewhat limited, we recommend specifying
`dtype` instead.
names : tuple of str, optional
The name of each column, e.g. ``('x', 'y', 'z')``.
buf : buffer, optional
By default, a new array is created of the given shape and data-type.
If `buf` is specified and is an object exposing the buffer interface,
the array will use the memory from the existing buffer. In this case,
the `offset` and `strides` keywords are available.
Other Parameters
----------------
titles : tuple of str, optional
Aliases for column names. For example, if `names` were
``('x', 'y', 'z')`` and `titles` is
``('x_coordinate', 'y_coordinate', 'z_coordinate')``, then
``arr['x']`` is equivalent to both ``arr.x`` and ``arr.x_coordinate``.
byteorder : {'<', '>', '='}, optional
Byte-order for all fields.
aligned : bool, optional
Align the fields in memory as the C-compiler would.
strides : tuple of ints, optional
Buffer (`buf`) is interpreted according to these strides (strides
define how many bytes each array element, row, column, etc.
occupy in memory).
offset : int, optional
Start reading buffer (`buf`) from this offset onwards.
order : {'C', 'F'}, optional
Row-major or column-major order.
Returns
-------
rec : recarray
Empty array of the given shape and type.
See Also
--------
rec.fromrecords : Construct a record array from data.
record : fundamental data-type for `recarray`.
format_parser : determine a data-type from formats, names, titles.
Notes
-----
This constructor can be compared to ``empty``: it creates a new record
array but does not fill it with data. To create a record array from data,
use one of the following methods:
1. Create a standard ndarray and convert it to a record array,
using ``arr.view(np.recarray)``
2. Use the `buf` keyword.
3. Use `np.rec.fromrecords`.
Examples
--------
Create an array with two fields, ``x`` and ``y``:
>>> x = np.array([(1.0, 2), (3.0, 4)], dtype=[('x', float), ('y', int)])
>>> x
array([(1.0, 2), (3.0, 4)],
dtype=[('x', '<f8'), ('y', '<i4')])
>>> x['x']
array([ 1., 3.])
View the array as a record array:
>>> x = x.view(np.recarray)
>>> x.x
array([ 1., 3.])
>>> x.y
array([2, 4])
Create a new, empty record array:
>>> np.recarray((2,),
... dtype=[('x', int), ('y', float), ('z', int)]) #doctest: +SKIP
rec.array([(-1073741821, 1.2249118382103472e-301, 24547520),
(3471280, 1.2134086255804012e-316, 0)],
dtype=[('x', '<i4'), ('y', '<f8'), ('z', '<i4')])
"""
# manually set name and module so that this class's type shows
# up as "numpy.recarray" when printed
__name__ = 'recarray'
__module__ = 'numpy'
def __new__(subtype, shape, dtype=None, buf=None, offset=0, strides=None,
formats=None, names=None, titles=None,
byteorder=None, aligned=False, order='C'):
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
if buf is None:
self = ndarray.__new__(subtype, shape, (record, descr), order=order)
else:
self = ndarray.__new__(subtype, shape, (record, descr),
buffer=buf, offset=offset,
strides=strides, order=order)
return self
def __getattribute__(self, attr):
# See if ndarray has this attr, and return it if so. (note that this
# means a field with the same name as an ndarray attr cannot be
# accessed by attribute).
try:
return object.__getattribute__(self, attr)
except AttributeError: # attr must be a fieldname
pass
# look for a field with this name
fielddict = ndarray.__getattribute__(self, 'dtype').fields
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("recarray has no attribute %s" % attr)
obj = self.getfield(*res)
# At this point obj will always be a recarray, since (see
# PyArray_GetField) the type of obj is inherited. Next, if obj.dtype is
# non-structured, convert it to an ndarray. If obj is structured leave
# it as a recarray, but make sure to convert to the same dtype.type (eg
# to preserve numpy.record type if present), since nested structured
# fields do not inherit type.
if obj.dtype.fields:
return obj.view(dtype=(self.dtype.type, obj.dtype.descr))
else:
return obj.view(ndarray)
# Save the dictionary.
# If the attr is a field name and not in the saved dictionary
# Undo any "setting" of the attribute and do a setfield
# Thus, you can't create attributes on-the-fly that are field names.
def __setattr__(self, attr, val):
newattr = attr not in self.__dict__
try:
ret = object.__setattr__(self, attr, val)
except:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
exctype, value = sys.exc_info()[:2]
raise exctype(value)
else:
fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
if attr not in fielddict:
return ret
if newattr: # We just added this one
try: # or this setattr worked on an internal
# attribute.
object.__delattr__(self, attr)
except:
return ret
try:
res = fielddict[attr][:2]
except (TypeError, KeyError):
raise AttributeError("record array has no attribute %s" % attr)
return self.setfield(val, *res)
def __getitem__(self, indx):
obj = ndarray.__getitem__(self, indx)
# copy behavior of getattr, except that here
# we might also be returning a single element
if isinstance(obj, ndarray):
if obj.dtype.fields:
return obj.view(dtype=(self.dtype.type, obj.dtype.descr))
else:
return obj.view(type=ndarray)
else:
# return a single element
return obj
def __repr__(self):
# get data/shape string. logic taken from numeric.array_repr
if self.size > 0 or self.shape==(0,):
lst = sb.array2string(self, separator=', ')
else:
# show zero-length shape unless it is (0,)
lst = "[], shape=%s" % (repr(self.shape),)
if self.dtype.type is record:
# If this is a full record array (has numpy.record dtype),
# represent it using the rec.array function. Since rec.array
# converts dtype to a numpy.record for us, use only dtype.descr,
# not repr(dtype).
lf = '\n'+' '*len("rec.array(")
return ('rec.array(%s, %sdtype=%s)' %
(lst, lf, repr(self.dtype.descr)))
else:
# otherwise represent it using np.array plus a view
# (There is currently (v1.10) no other easy way to create it)
lf = '\n'+' '*len("array(")
return ('array(%s, %sdtype=%s).view(numpy.recarray)' %
(lst, lf, str(self.dtype)))
def field(self, attr, val=None):
if isinstance(attr, int):
names = ndarray.__getattribute__(self, 'dtype').names
attr = names[attr]
fielddict = ndarray.__getattribute__(self, 'dtype').fields
res = fielddict[attr][:2]
if val is None:
obj = self.getfield(*res)
if obj.dtype.fields:
return obj
return obj.view(ndarray)
else:
return self.setfield(val, *res)
def view(self, dtype=None, type=None):
if dtype is None:
return ndarray.view(self, type)
elif type is None:
try:
if issubclass(dtype, ndarray):
return ndarray.view(self, dtype)
except TypeError:
pass
dtype = sb.dtype(dtype)
if dtype.fields is None:
return self.__array__().view(dtype)
return ndarray.view(self, dtype)
else:
return ndarray.view(self, dtype, type)
def fromarrays(arrayList, dtype=None, shape=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a record array from a (flat) list of arrays
>>> x1=np.array([1,2,3,4])
>>> x2=np.array(['a','dd','xyz','12'])
>>> x3=np.array([1.1,2,3,4])
>>> r = np.core.records.fromarrays([x1,x2,x3],names='a,b,c')
>>> print r[1]
(2, 'dd', 2.0)
>>> x1[1]=34
>>> r.a
array([1, 2, 3, 4])
"""
arrayList = [sb.asarray(x) for x in arrayList]
if shape is None or shape == 0:
shape = arrayList[0].shape
if isinstance(shape, int):
shape = (shape,)
if formats is None and dtype is None:
# go through each object in the list to see if it is an ndarray
# and determine the formats.
formats = []
for obj in arrayList:
if not isinstance(obj, ndarray):
raise ValueError("item in the array list must be an ndarray.")
formats.append(obj.dtype.str)
formats = ','.join(formats)
if dtype is not None:
descr = sb.dtype(dtype)
_names = descr.names
else:
parsed = format_parser(formats, names, titles, aligned, byteorder)
_names = parsed._names
descr = parsed._descr
# Determine shape from data-type.
if len(descr) != len(arrayList):
raise ValueError("mismatch between the number of fields "
"and the number of arrays")
d0 = descr[0].shape
nn = len(d0)
if nn > 0:
shape = shape[:-nn]
for k, obj in enumerate(arrayList):
nn = len(descr[k].shape)
testshape = obj.shape[:len(obj.shape) - nn]
if testshape != shape:
raise ValueError("array-shape mismatch in array %d" % k)
_array = recarray(shape, descr)
# populate the record array (makes a copy)
for i in range(len(arrayList)):
_array[_names[i]] = arrayList[i]
return _array
# shape must be 1-d if you use list of lists...
def fromrecords(recList, dtype=None, shape=None, formats=None, names=None,
titles=None, aligned=False, byteorder=None):
""" create a recarray from a list of records in text form
The data in the same field can be heterogeneous, they will be promoted
to the highest data type. This method is intended for creating
smaller record arrays. If used to create large array without formats
defined
r=fromrecords([(2,3.,'abc')]*100000)
it can be slow.
If formats is None, then this will auto-detect formats. Use list of
tuples rather than list of lists for faster processing.
>>> r=np.core.records.fromrecords([(456,'dbe',1.2),(2,'de',1.3)],
... names='col1,col2,col3')
>>> print r[0]
(456, 'dbe', 1.2)
>>> r.col1
array([456, 2])
>>> r.col2
array(['dbe', 'de'],
dtype='|S3')
>>> import pickle
>>> print pickle.loads(pickle.dumps(r))
[(456, 'dbe', 1.2) (2, 'de', 1.3)]
"""
nfields = len(recList[0])
if formats is None and dtype is None: # slower
obj = sb.array(recList, dtype=object)
arrlist = [sb.array(obj[..., i].tolist()) for i in range(nfields)]
return fromarrays(arrlist, formats=formats, shape=shape, names=names,
titles=titles, aligned=aligned, byteorder=byteorder)
if dtype is not None:
descr = sb.dtype((record, dtype))
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
try:
retval = sb.array(recList, dtype=descr)
except TypeError: # list of lists instead of list of tuples
if (shape is None or shape == 0):
shape = len(recList)
if isinstance(shape, (int, long)):
shape = (shape,)
if len(shape) > 1:
raise ValueError("Can only deal with 1-d array.")
_array = recarray(shape, descr)
for k in range(_array.size):
_array[k] = tuple(recList[k])
return _array
else:
if shape is not None and retval.shape != shape:
retval.shape = shape
res = retval.view(recarray)
return res
def fromstring(datastring, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
""" create a (read-only) record array from binary data contained in
a string"""
if dtype is None and formats is None:
raise ValueError("Must have dtype= or formats=")
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
if (shape is None or shape == 0 or shape == -1):
shape = (len(datastring) - offset) / itemsize
_array = recarray(shape, descr, buf=datastring, offset=offset)
return _array
def get_remaining_size(fd):
try:
fn = fd.fileno()
except AttributeError:
return os.path.getsize(fd.name) - fd.tell()
st = os.fstat(fn)
size = st.st_size - fd.tell()
return size
def fromfile(fd, dtype=None, shape=None, offset=0, formats=None,
names=None, titles=None, aligned=False, byteorder=None):
"""Create an array from binary file data
If file is a string then that file is opened, else it is assumed
to be a file object.
>>> from tempfile import TemporaryFile
>>> a = np.empty(10,dtype='f8,i4,a5')
>>> a[5] = (0.5,10,'abcde')
>>>
>>> fd=TemporaryFile()
>>> a = a.newbyteorder('<')
>>> a.tofile(fd)
>>>
>>> fd.seek(0)
>>> r=np.core.records.fromfile(fd, formats='f8,i4,a5', shape=10,
... byteorder='<')
>>> print r[5]
(0.5, 10, 'abcde')
>>> r.shape
(10,)
"""
if (shape is None or shape == 0):
shape = (-1,)
elif isinstance(shape, (int, long)):
shape = (shape,)
name = 0
if isinstance(fd, str):
name = 1
fd = open(fd, 'rb')
if (offset > 0):
fd.seek(offset, 1)
size = get_remaining_size(fd)
if dtype is not None:
descr = sb.dtype(dtype)
else:
descr = format_parser(formats, names, titles, aligned, byteorder)._descr
itemsize = descr.itemsize
shapeprod = sb.array(shape).prod()
shapesize = shapeprod * itemsize
if shapesize < 0:
shape = list(shape)
shape[ shape.index(-1) ] = size / -shapesize
shape = tuple(shape)
shapeprod = sb.array(shape).prod()
nbytes = shapeprod * itemsize
if nbytes > size:
raise ValueError(
"Not enough bytes left in file for specified shape and type")
# create the array
_array = recarray(shape, descr)
nbytesread = fd.readinto(_array.data)
if nbytesread != nbytes:
raise IOError("Didn't read as many bytes as expected")
if name:
fd.close()
return _array
def array(obj, dtype=None, shape=None, offset=0, strides=None, formats=None,
names=None, titles=None, aligned=False, byteorder=None, copy=True):
"""Construct a record array from a wide-variety of objects.
"""
if (isinstance(obj, (type(None), str)) or isfileobj(obj)) \
and (formats is None) \
and (dtype is None):
raise ValueError("Must define formats (or dtype) if object is "\
"None, string, or an open file")
kwds = {}
if dtype is not None:
dtype = sb.dtype(dtype)
elif formats is not None:
dtype = format_parser(formats, names, titles,
aligned, byteorder)._descr
else:
kwds = {'formats': formats,
'names' : names,
'titles' : titles,
'aligned' : aligned,
'byteorder' : byteorder
}
if obj is None:
if shape is None:
raise ValueError("Must define a shape if obj is None")
return recarray(shape, dtype, buf=obj, offset=offset, strides=strides)
elif isinstance(obj, bytes):
return fromstring(obj, dtype, shape=shape, offset=offset, **kwds)
elif isinstance(obj, (list, tuple)):
if isinstance(obj[0], (tuple, list)):
return fromrecords(obj, dtype=dtype, shape=shape, **kwds)
else:
return fromarrays(obj, dtype=dtype, shape=shape, **kwds)
elif isinstance(obj, recarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
return new
elif isfileobj(obj):
return fromfile(obj, dtype=dtype, shape=shape, offset=offset)
elif isinstance(obj, ndarray):
if dtype is not None and (obj.dtype != dtype):
new = obj.view(dtype)
else:
new = obj
if copy:
new = new.copy()
res = new.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
else:
interface = getattr(obj, "__array_interface__", None)
if interface is None or not isinstance(interface, dict):
raise ValueError("Unknown input type")
obj = sb.array(obj)
if dtype is not None and (obj.dtype != dtype):
obj = obj.view(dtype)
res = obj.view(recarray)
if issubclass(res.dtype.type, nt.void):
res.dtype = sb.dtype((record, res.dtype))
return res
| bsd-3-clause |
QualiSystems/shellfoundry | tests/test_utilities/test_datamodel_merger.py | 1 | 12597 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import codecs
import os
import unittest
import xml.etree.ElementTree as etree
from shellfoundry.utilities.shell_datamodel_merger import ShellDataModelMerger
from tests import TEST_DIR
class TestDataModelMerger(unittest.TestCase):
def test_works_with_utf_files(self):
with codecs.open(
os.path.join(TEST_DIR, "test_utilities", "test_data", "datamodel.xml"),
"r",
encoding="utf8",
) as f:
dm = f.read()
with codecs.open(
os.path.join(TEST_DIR, "test_utilities", "test_data", "shell_model.xml"),
"r",
encoding="utf8",
) as f:
shell = f.read()
merger = ShellDataModelMerger()
merged_xml = merger.merge_shell_model(dm, shell)
self.assertIsNotNone(merged_xml)
def test_merges_attributes(self):
datamodel = """<?xml version="1.0" encoding="utf-8"?>
<DataModelInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd"> # noqa: E501
<Attributes>
</Attributes>
<ResourceFamilies>
<ResourceFamily Name="Switch" Description="" IsSearchable="true" IsPowerSwitch="true">
<AttachedAttributes />
<AttributeValues />
<Models>
</Models>
<Categories />
</ResourceFamily>
</ResourceFamilies>
<DriverDescriptors />
<ScriptDescriptors />
</DataModelInfo>
"""
shell = """
<Shell>
<ShellAttributes>
<AttributeInfo Name="Shell Enable Password" Type="Password" DefaultValue="3M3u7nkDzxWb0aJ/IZYeWw==" IsReadOnly="false"> # noqa: E501
<Rules>
<Rule Name="Configuration" />
</Rules>
</AttributeInfo>
<AttributeInfo Name="Shell Power Management" Type="Boolean" DefaultValue="False" IsReadOnly="false">
<Rules>
<Rule Name="Configuration" />
</Rules>
</AttributeInfo>
</ShellAttributes>
<ShellModel Family="Switch">
<ResourceModel Name="SSwitch" Description="" SupportsConcurrentCommands="true">
<AttachedAttributes>
</AttachedAttributes>
<AttributeValues>
</AttributeValues>
<Drivers>
<DriverName>SSwitchDriver</DriverName>
</Drivers>
<Scripts />
</ResourceModel>
</ShellModel>
</Shell>
"""
merger = ShellDataModelMerger()
merged_xml = merger.merge_shell_model(datamodel, shell)
parser = etree.XMLParser(encoding="utf-8")
tree = etree.fromstring(merged_xml, parser)
self.assertIsNotNone(
tree.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}AttributeInfo[@Name='Shell Enable Password']" # noqa: E501
),
"Attribute was not found in merged xml",
)
self.assertIsNotNone(
tree.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}AttributeInfo[@Name='Shell Power Management']" # noqa: E501
),
"Attribute was not found in merged xml",
)
def test_exception_thrown_when_family_missing_in_data_model(self):
datamodel = """<?xml version="1.0" encoding="utf-8"?>
<DataModelInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd"> # noqa: E501
<Attributes>
</Attributes>
<ResourceFamilies>
<ResourceFamily Name="Switch" Description="" IsSearchable="true" IsPowerSwitch="true">
<AttachedAttributes />
<AttributeValues />
<Models>
</Models>
<Categories />
</ResourceFamily>
</ResourceFamilies>
<DriverDescriptors />
<ScriptDescriptors />
</DataModelInfo>
"""
shell = """
<Shell>
<ShellAttributes>
<AttributeInfo Name="Shell Enable Password" Type="Password" DefaultValue="3M3u7nkDzxWb0aJ/IZYeWw==" IsReadOnly="false"> # noqa: E501
<Rules>
<Rule Name="Configuration" />
</Rules>
</AttributeInfo>
<AttributeInfo Name="Shell Power Management" Type="Boolean" DefaultValue="False" IsReadOnly="false">
<Rules>
<Rule Name="Configuration" />
</Rules>
</AttributeInfo>
</ShellAttributes>
<ShellModel Family="NON EXISTING FAMILY">
<ResourceModel Name="SSwitch" Description="" SupportsConcurrentCommands="true">
<AttachedAttributes>
</AttachedAttributes>
<AttributeValues>
</AttributeValues>
<Drivers>
<DriverName>SSwitchDriver</DriverName>
</Drivers>
<Scripts />
</ResourceModel>
</ShellModel>
</Shell>
"""
merger = ShellDataModelMerger()
# Assert
self.assertRaises(Exception, merger.merge_shell_model, datamodel, shell)
def test_exception_thrown_when_shell_model_missing_in_data_model(self):
datamodel = """<?xml version="1.0" encoding="utf-8"?>
<DataModelInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd"> # noqa: E501
<Attributes>
</Attributes>
<ResourceFamilies>
<ResourceFamily Name="Switch" Description="" IsSearchable="true" IsPowerSwitch="true">
<AttachedAttributes />
<AttributeValues />
<Models>
</Models>
<Categories />
</ResourceFamily>
</ResourceFamilies>
<DriverDescriptors />
<ScriptDescriptors />
</DataModelInfo>
"""
shell = """
<Shell>
<ShellAttributes>
<AttributeInfo Name="Shell Enable Password" Type="Password" DefaultValue="3M3u7nkDzxWb0aJ/IZYeWw==" IsReadOnly="false"> # noqa: E501
<Rules>
<Rule Name="Configuration" />
</Rules>
</AttributeInfo>
<AttributeInfo Name="Shell Power Management" Type="Boolean" DefaultValue="False" IsReadOnly="false">
<Rules>
<Rule Name="Configuration" />
</Rules>
</AttributeInfo>
</ShellAttributes>
</Shell>
"""
merger = ShellDataModelMerger()
# Assert
self.assertRaises(Exception, merger.merge_shell_model, datamodel, shell)
def test_adds_the_shell_model_to_the_datamodel(self):
datamodel = """<?xml version="1.0" encoding="utf-8"?>
<DataModelInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd"> # noqa: E501
<Attributes>
</Attributes>
<ResourceFamilies>
<ResourceFamily Name="Switch" Description="" IsSearchable="true" IsPowerSwitch="true">
<AttachedAttributes />
<AttributeValues />
<Models>
</Models>
<Categories />
</ResourceFamily>
</ResourceFamilies>
<DriverDescriptors />
<ScriptDescriptors />
</DataModelInfo>
"""
shell = """
<Shell>
<ShellAttributes>
</ShellAttributes>
<ShellModel Family="Switch">
<ResourceModel Name="SSwitch" Description="" SupportsConcurrentCommands="true"> # noqa: E501
<AttachedAttributes>
</AttachedAttributes>
<AttributeValues>
</AttributeValues>
<Drivers>
<DriverName>SSwitchDriver</DriverName>
</Drivers>
<Scripts />
</ResourceModel>
</ShellModel>
</Shell>
"""
merger = ShellDataModelMerger()
merged_xml = merger.merge_shell_model(datamodel, shell)
parser = etree.XMLParser(encoding="utf-8")
tree = etree.fromstring(merged_xml, parser)
self.assertIsNotNone(
tree.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}ResourceModel" # noqa: E501
),
"Model was not found in merged xml",
)
def test_addds_the_shell_model_to_the_target_family(self):
datamodel = """<?xml version="1.0" encoding="utf-8"?>
<DataModelInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns="http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd"> # noqa: E501
<Attributes>
</Attributes>
<ResourceFamilies>
<ResourceFamily Name="Bait" Description="" IsSearchable="true" IsPowerSwitch="true">
<AttachedAttributes />
<AttributeValues />
<Models>
</Models>
<Categories />
</ResourceFamily>
<ResourceFamily Name="Switch" Description="" IsSearchable="true" IsPowerSwitch="true">
<AttachedAttributes />
<AttributeValues />
<Models>
</Models>
<Categories />
</ResourceFamily>
</ResourceFamilies>
<DriverDescriptors />
<ScriptDescriptors />
</DataModelInfo>
"""
shell = """
<Shell>
<ShellAttributes>
</ShellAttributes>
<ShellModel Family="Switch">
<ResourceModel Name="SSwitch" Description="" SupportsConcurrentCommands="true"> # noqa: E501
<AttachedAttributes>
</AttachedAttributes>
<AttributeValues>
</AttributeValues>
<Drivers>
<DriverName>SSwitchDriver</DriverName>
</Drivers>
<Scripts />
</ResourceModel>
</ShellModel>
</Shell>
"""
merger = ShellDataModelMerger()
merged_xml = merger.merge_shell_model(datamodel, shell)
parser = etree.XMLParser(encoding="utf-8")
tree = etree.fromstring(merged_xml, parser)
family = tree.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}ResourceFamily[@Name='Switch']" # noqa: E501
)
self.assertIsNotNone(
family.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}ResourceModel[@Name='SSwitch']" # noqa: E501
),
"Model was not found in merged xml",
)
bait_family = tree.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}ResourceFamily[@Name='Bait']" # noqa: E501
)
self.assertIsNone(
bait_family.find(
".//{http://schemas.qualisystems.com/ResourceManagement/DataModelSchema.xsd}ResourceModel[@Name='SSwitch']" # noqa: E501
),
"Model was added to wrong element",
)
| apache-2.0 |
feelobot/compose | compose/config.py | 9 | 16232 | import logging
import os
import sys
import yaml
from collections import namedtuple
import six
from compose.cli.utils import find_candidates_in_parent_dirs
DOCKER_CONFIG_KEYS = [
'cap_add',
'cap_drop',
'cpu_shares',
'cpuset',
'command',
'detach',
'devices',
'dns',
'dns_search',
'domainname',
'entrypoint',
'env_file',
'environment',
'extra_hosts',
'hostname',
'image',
'labels',
'links',
'mac_address',
'mem_limit',
'memswap_limit',
'net',
'log_driver',
'log_opt',
'pid',
'ports',
'privileged',
'read_only',
'restart',
'security_opt',
'stdin_open',
'tty',
'user',
'volumes',
'volumes_from',
'working_dir',
]
ALLOWED_KEYS = DOCKER_CONFIG_KEYS + [
'build',
'container_name',
'dockerfile',
'expose',
'external_links',
'name',
]
DOCKER_CONFIG_HINTS = {
'cpu_share': 'cpu_shares',
'add_host': 'extra_hosts',
'hosts': 'extra_hosts',
'extra_host': 'extra_hosts',
'device': 'devices',
'link': 'links',
'memory_swap': 'memswap_limit',
'port': 'ports',
'privilege': 'privileged',
'priviliged': 'privileged',
'privilige': 'privileged',
'volume': 'volumes',
'workdir': 'working_dir',
}
SUPPORTED_FILENAMES = [
'docker-compose.yml',
'docker-compose.yaml',
'fig.yml',
'fig.yaml',
]
log = logging.getLogger(__name__)
ConfigDetails = namedtuple('ConfigDetails', 'config working_dir filename')
def find(base_dir, filename):
if filename == '-':
return ConfigDetails(yaml.safe_load(sys.stdin), os.getcwd(), None)
if filename:
filename = os.path.join(base_dir, filename)
else:
filename = get_config_path(base_dir)
return ConfigDetails(load_yaml(filename), os.path.dirname(filename), filename)
def get_config_path(base_dir):
(candidates, path) = find_candidates_in_parent_dirs(SUPPORTED_FILENAMES, base_dir)
if len(candidates) == 0:
raise ComposeFileNotFound(SUPPORTED_FILENAMES)
winner = candidates[0]
if len(candidates) > 1:
log.warn("Found multiple config files with supported names: %s", ", ".join(candidates))
log.warn("Using %s\n", winner)
if winner == 'docker-compose.yaml':
log.warn("Please be aware that .yml is the expected extension "
"in most cases, and using .yaml can cause compatibility "
"issues in future.\n")
if winner.startswith("fig."):
log.warn("%s is deprecated and will not be supported in future. "
"Please rename your config file to docker-compose.yml\n" % winner)
return os.path.join(path, winner)
def load(config_details):
dictionary, working_dir, filename = config_details
service_dicts = []
for service_name, service_dict in list(dictionary.items()):
if not isinstance(service_dict, dict):
raise ConfigurationError('Service "%s" doesn\'t have any configuration options. All top level keys in your docker-compose.yml must map to a dictionary of configuration options.' % service_name)
loader = ServiceLoader(working_dir=working_dir, filename=filename)
service_dict = loader.make_service_dict(service_name, service_dict)
validate_paths(service_dict)
service_dicts.append(service_dict)
return service_dicts
class ServiceLoader(object):
def __init__(self, working_dir, filename=None, already_seen=None):
self.working_dir = os.path.abspath(working_dir)
if filename:
self.filename = os.path.abspath(filename)
else:
self.filename = filename
self.already_seen = already_seen or []
def detect_cycle(self, name):
if self.signature(name) in self.already_seen:
raise CircularReference(self.already_seen + [self.signature(name)])
def make_service_dict(self, name, service_dict):
service_dict = service_dict.copy()
service_dict['name'] = name
service_dict = resolve_environment(service_dict, working_dir=self.working_dir)
service_dict = self.resolve_extends(service_dict)
return process_container_options(service_dict, working_dir=self.working_dir)
def resolve_extends(self, service_dict):
if 'extends' not in service_dict:
return service_dict
extends_options = self.validate_extends_options(service_dict['name'], service_dict['extends'])
if self.working_dir is None:
raise Exception("No working_dir passed to ServiceLoader()")
if 'file' in extends_options:
extends_from_filename = extends_options['file']
other_config_path = expand_path(self.working_dir, extends_from_filename)
else:
other_config_path = self.filename
other_working_dir = os.path.dirname(other_config_path)
other_already_seen = self.already_seen + [self.signature(service_dict['name'])]
other_loader = ServiceLoader(
working_dir=other_working_dir,
filename=other_config_path,
already_seen=other_already_seen,
)
other_config = load_yaml(other_config_path)
other_service_dict = other_config[extends_options['service']]
other_loader.detect_cycle(extends_options['service'])
other_service_dict = other_loader.make_service_dict(
service_dict['name'],
other_service_dict,
)
validate_extended_service_dict(
other_service_dict,
filename=other_config_path,
service=extends_options['service'],
)
return merge_service_dicts(other_service_dict, service_dict)
def signature(self, name):
return (self.filename, name)
def validate_extends_options(self, service_name, extends_options):
error_prefix = "Invalid 'extends' configuration for %s:" % service_name
if not isinstance(extends_options, dict):
raise ConfigurationError("%s must be a dictionary" % error_prefix)
if 'service' not in extends_options:
raise ConfigurationError(
"%s you need to specify a service, e.g. 'service: web'" % error_prefix
)
if 'file' not in extends_options and self.filename is None:
raise ConfigurationError(
"%s you need to specify a 'file', e.g. 'file: something.yml'" % error_prefix
)
for k, _ in extends_options.items():
if k not in ['file', 'service']:
raise ConfigurationError(
"%s unsupported configuration option '%s'" % (error_prefix, k)
)
return extends_options
def validate_extended_service_dict(service_dict, filename, service):
error_prefix = "Cannot extend service '%s' in %s:" % (service, filename)
if 'links' in service_dict:
raise ConfigurationError("%s services with 'links' cannot be extended" % error_prefix)
if 'volumes_from' in service_dict:
raise ConfigurationError("%s services with 'volumes_from' cannot be extended" % error_prefix)
if 'net' in service_dict:
if get_service_name_from_net(service_dict['net']) is not None:
raise ConfigurationError("%s services with 'net: container' cannot be extended" % error_prefix)
def process_container_options(service_dict, working_dir=None):
for k in service_dict:
if k not in ALLOWED_KEYS:
msg = "Unsupported config option for %s service: '%s'" % (service_dict['name'], k)
if k in DOCKER_CONFIG_HINTS:
msg += " (did you mean '%s'?)" % DOCKER_CONFIG_HINTS[k]
raise ConfigurationError(msg)
service_dict = service_dict.copy()
if 'memswap_limit' in service_dict and 'mem_limit' not in service_dict:
raise ConfigurationError("Invalid 'memswap_limit' configuration for %s service: when defining 'memswap_limit' you must set 'mem_limit' as well" % service_dict['name'])
if 'volumes' in service_dict:
service_dict['volumes'] = resolve_volume_paths(service_dict['volumes'], working_dir=working_dir)
if 'build' in service_dict:
service_dict['build'] = resolve_build_path(service_dict['build'], working_dir=working_dir)
if 'labels' in service_dict:
service_dict['labels'] = parse_labels(service_dict['labels'])
return service_dict
def merge_service_dicts(base, override):
d = base.copy()
if 'environment' in base or 'environment' in override:
d['environment'] = merge_environment(
base.get('environment'),
override.get('environment'),
)
path_mapping_keys = ['volumes', 'devices']
for key in path_mapping_keys:
if key in base or key in override:
d[key] = merge_path_mappings(
base.get(key),
override.get(key),
)
if 'labels' in base or 'labels' in override:
d['labels'] = merge_labels(
base.get('labels'),
override.get('labels'),
)
if 'image' in override and 'build' in d:
del d['build']
if 'build' in override and 'image' in d:
del d['image']
list_keys = ['ports', 'expose', 'external_links']
for key in list_keys:
if key in base or key in override:
d[key] = base.get(key, []) + override.get(key, [])
list_or_string_keys = ['dns', 'dns_search']
for key in list_or_string_keys:
if key in base or key in override:
d[key] = to_list(base.get(key)) + to_list(override.get(key))
already_merged_keys = ['environment', 'labels'] + path_mapping_keys + list_keys + list_or_string_keys
for k in set(ALLOWED_KEYS) - set(already_merged_keys):
if k in override:
d[k] = override[k]
return d
def merge_environment(base, override):
env = parse_environment(base)
env.update(parse_environment(override))
return env
def parse_links(links):
return dict(parse_link(l) for l in links)
def parse_link(link):
if ':' in link:
source, alias = link.split(':', 1)
return (alias, source)
else:
return (link, link)
def get_env_files(options, working_dir=None):
if 'env_file' not in options:
return {}
if working_dir is None:
raise Exception("No working_dir passed to get_env_files()")
env_files = options.get('env_file', [])
if not isinstance(env_files, list):
env_files = [env_files]
return [expand_path(working_dir, path) for path in env_files]
def resolve_environment(service_dict, working_dir=None):
service_dict = service_dict.copy()
if 'environment' not in service_dict and 'env_file' not in service_dict:
return service_dict
env = {}
if 'env_file' in service_dict:
for f in get_env_files(service_dict, working_dir=working_dir):
env.update(env_vars_from_file(f))
del service_dict['env_file']
env.update(parse_environment(service_dict.get('environment')))
env = dict(resolve_env_var(k, v) for k, v in six.iteritems(env))
service_dict['environment'] = env
return service_dict
def parse_environment(environment):
if not environment:
return {}
if isinstance(environment, list):
return dict(split_env(e) for e in environment)
if isinstance(environment, dict):
return environment
raise ConfigurationError(
"environment \"%s\" must be a list or mapping," %
environment
)
def split_env(env):
if '=' in env:
return env.split('=', 1)
else:
return env, None
def resolve_env_var(key, val):
if val is not None:
return key, val
elif key in os.environ:
return key, os.environ[key]
else:
return key, ''
def env_vars_from_file(filename):
"""
Read in a line delimited file of environment variables.
"""
if not os.path.exists(filename):
raise ConfigurationError("Couldn't find env file: %s" % filename)
env = {}
for line in open(filename, 'r'):
line = line.strip()
if line and not line.startswith('#'):
k, v = split_env(line)
env[k] = v
return env
def resolve_volume_paths(volumes, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_volume_paths()")
return [resolve_volume_path(v, working_dir) for v in volumes]
def resolve_volume_path(volume, working_dir):
container_path, host_path = split_path_mapping(volume)
container_path = os.path.expanduser(os.path.expandvars(container_path))
if host_path is not None:
host_path = os.path.expanduser(os.path.expandvars(host_path))
return "%s:%s" % (expand_path(working_dir, host_path), container_path)
else:
return container_path
def resolve_build_path(build_path, working_dir=None):
if working_dir is None:
raise Exception("No working_dir passed to resolve_build_path")
return expand_path(working_dir, build_path)
def validate_paths(service_dict):
if 'build' in service_dict:
build_path = service_dict['build']
if not os.path.exists(build_path) or not os.access(build_path, os.R_OK):
raise ConfigurationError("build path %s either does not exist or is not accessible." % build_path)
def merge_path_mappings(base, override):
d = dict_from_path_mappings(base)
d.update(dict_from_path_mappings(override))
return path_mappings_from_dict(d)
def dict_from_path_mappings(path_mappings):
if path_mappings:
return dict(split_path_mapping(v) for v in path_mappings)
else:
return {}
def path_mappings_from_dict(d):
return [join_path_mapping(v) for v in d.items()]
def split_path_mapping(string):
if ':' in string:
(host, container) = string.split(':', 1)
return (container, host)
else:
return (string, None)
def join_path_mapping(pair):
(container, host) = pair
if host is None:
return container
else:
return ":".join((host, container))
def merge_labels(base, override):
labels = parse_labels(base)
labels.update(parse_labels(override))
return labels
def parse_labels(labels):
if not labels:
return {}
if isinstance(labels, list):
return dict(split_label(e) for e in labels)
if isinstance(labels, dict):
return labels
raise ConfigurationError(
"labels \"%s\" must be a list or mapping" %
labels
)
def split_label(label):
if '=' in label:
return label.split('=', 1)
else:
return label, ''
def expand_path(working_dir, path):
return os.path.abspath(os.path.join(working_dir, path))
def to_list(value):
if value is None:
return []
elif isinstance(value, six.string_types):
return [value]
else:
return value
def get_service_name_from_net(net_config):
if not net_config:
return
if not net_config.startswith('container:'):
return
_, net_name = net_config.split(':', 1)
return net_name
def load_yaml(filename):
try:
with open(filename, 'r') as fh:
return yaml.safe_load(fh)
except IOError as e:
raise ConfigurationError(six.text_type(e))
class ConfigurationError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return self.msg
class CircularReference(ConfigurationError):
def __init__(self, trail):
self.trail = trail
@property
def msg(self):
lines = [
"{} in {}".format(service_name, filename)
for (filename, service_name) in self.trail
]
return "Circular reference:\n {}".format("\n extends ".join(lines))
class ComposeFileNotFound(ConfigurationError):
def __init__(self, supported_filenames):
super(ComposeFileNotFound, self).__init__("""
Can't find a suitable configuration file in this directory or any parent. Are you in the right directory?
Supported filenames: %s
""" % ", ".join(supported_filenames))
| apache-2.0 |
sgraham/nope | build/android/pylib/utils/timeout_retry.py | 52 | 5301 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility to run functions with timeouts and retries."""
# pylint: disable=W0702
import logging
import threading
import time
import traceback
from pylib.utils import reraiser_thread
from pylib.utils import watchdog_timer
class TimeoutRetryThread(reraiser_thread.ReraiserThread):
def __init__(self, func, timeout, name):
super(TimeoutRetryThread, self).__init__(func, name=name)
self._watcher = watchdog_timer.WatchdogTimer(timeout)
self._expired = False
def GetWatcher(self):
"""Returns the watchdog keeping track of this thread's time."""
return self._watcher
def GetElapsedTime(self):
return self._watcher.GetElapsed()
def GetRemainingTime(self, required=0, msg=None):
"""Get the remaining time before the thread times out.
Useful to send as the |timeout| parameter of async IO operations.
Args:
required: minimum amount of time that will be required to complete, e.g.,
some sleep or IO operation.
msg: error message to show if timing out.
Returns:
The number of seconds remaining before the thread times out, or None
if the thread never times out.
Raises:
reraiser_thread.TimeoutError if the remaining time is less than the
required time.
"""
remaining = self._watcher.GetRemaining()
if remaining is not None and remaining < required:
if msg is None:
msg = 'Timeout expired'
if remaining > 0:
msg += (', wait of %.1f secs required but only %.1f secs left'
% (required, remaining))
self._expired = True
raise reraiser_thread.TimeoutError(msg)
return remaining
def LogTimeoutException(self):
"""Log the exception that terminated this thread."""
if not self._expired:
return
logging.critical('*' * 80)
logging.critical('%s on thread %r', self._exc_info[0].__name__, self.name)
logging.critical('*' * 80)
fmt_exc = ''.join(traceback.format_exception(*self._exc_info))
for line in fmt_exc.splitlines():
logging.critical(line.rstrip())
logging.critical('*' * 80)
def CurrentTimeoutThread():
"""Get the current thread if it is a TimeoutRetryThread.
Returns:
The current thread if it is a TimeoutRetryThread, otherwise None.
"""
current_thread = threading.current_thread()
if isinstance(current_thread, TimeoutRetryThread):
return current_thread
else:
return None
def WaitFor(condition, wait_period=5, max_tries=None):
"""Wait for a condition to become true.
Repeadly call the function condition(), with no arguments, until it returns
a true value.
If called within a TimeoutRetryThread, it cooperates nicely with it.
Args:
condition: function with the condition to check
wait_period: number of seconds to wait before retrying to check the
condition
max_tries: maximum number of checks to make, the default tries forever
or until the TimeoutRetryThread expires.
Returns:
The true value returned by the condition, or None if the condition was
not met after max_tries.
Raises:
reraiser_thread.TimeoutError if the current thread is a TimeoutRetryThread
and the timeout expires.
"""
condition_name = condition.__name__
timeout_thread = CurrentTimeoutThread()
while max_tries is None or max_tries > 0:
result = condition()
if max_tries is not None:
max_tries -= 1
msg = ['condition', repr(condition_name), 'met' if result else 'not met']
if timeout_thread:
msg.append('(%.1fs)' % timeout_thread.GetElapsedTime())
logging.info(' '.join(msg))
if result:
return result
if timeout_thread:
timeout_thread.GetRemainingTime(wait_period,
msg='Timed out waiting for %r' % condition_name)
time.sleep(wait_period)
return None
def Run(func, timeout, retries, args=None, kwargs=None):
"""Runs the passed function in a separate thread with timeouts and retries.
Args:
func: the function to be wrapped.
timeout: the timeout in seconds for each try.
retries: the number of retries.
args: list of positional args to pass to |func|.
kwargs: dictionary of keyword args to pass to |func|.
Returns:
The return value of func(*args, **kwargs).
"""
if not args:
args = []
if not kwargs:
kwargs = {}
# The return value uses a list because Python variables are references, not
# values. Closures make a copy of the reference, so updating the closure's
# reference wouldn't update where the original reference pointed.
ret = [None]
def RunOnTimeoutThread():
ret[0] = func(*args, **kwargs)
num_try = 1
while True:
child_thread = TimeoutRetryThread(
RunOnTimeoutThread, timeout,
name='TimeoutThread-%d-for-%s' % (num_try,
threading.current_thread().name))
try:
thread_group = reraiser_thread.ReraiserThreadGroup([child_thread])
thread_group.StartAll()
thread_group.JoinAll(child_thread.GetWatcher())
return ret[0]
except:
child_thread.LogTimeoutException()
if num_try > retries:
raise
num_try += 1
| bsd-3-clause |
pythonprobr/pythonpro-website | pythonpro/memberkit/management/commands/create_subscriptions_for_roles.py | 1 | 1424 | from django.contrib.auth import get_user_model
from django.core.management.base import BaseCommand
from pythonpro.memberkit.models import Subscription
class Command(BaseCommand):
help = 'Cria Assinatura para usuários sem pagamentos'
def add_arguments(self, parser):
pass
def handle(self, *args, **options):
role_to_subscription_type_dct = {
'data_scientist': 4456,
'webdev': 4426,
'member': 4424,
'bootcamper': 4423,
'pythonista': 4423,
'client': 4420,
}
for role, subscription_type_id in role_to_subscription_type_dct.items():
users_with_role = get_user_model().objects.filter(
groups__name=role
).exclude(subscriptions__subscription_types__id=subscription_type_id).only('id')
for user in users_with_role:
self.stdout.write(self.style.SUCCESS(f'Processando usuário {user} com papel {role}'))
subscription = Subscription.objects.create(
subscriber_id=user.id,
status=Subscription.Status.INACTIVE,
observation='Assinatura sem pagamento criada automaticamente por comando no servidor'
)
subscription.subscription_types.set([subscription_type_id])
self.stdout.write(self.style.SUCCESS(f'Criada {subscription}'))
| agpl-3.0 |
nvoron23/scikit-learn | sklearn/preprocessing/_function_transformer.py | 163 | 2407 | from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array
def _identity(X):
"""The identity function.
"""
return X
class FunctionTransformer(BaseEstimator, TransformerMixin):
"""Constructs a transformer from an arbitrary callable.
A FunctionTransformer forwards its X (and optionally y) arguments to a
user-defined function or function object and returns the result of this
function. This is useful for stateless transformations such as taking the
log of frequencies, doing custom scaling, etc.
A FunctionTransformer will not do any checks on its function's output.
Note: If a lambda is used as the function, then the resulting
transformer will not be pickleable.
Parameters
----------
func : callable, optional default=None
The callable to use for the transformation. This will be passed
the same arguments as transform, with args and kwargs forwarded.
If func is None, then func will be the identity function.
validate : bool, optional default=True
Indicate that the input X array should be checked before calling
func. If validate is false, there will be no input validation.
If it is true, then X will be converted to a 2-dimensional NumPy
array or sparse matrix. If this conversion is not possible or X
contains NaN or infinity, an exception is raised.
accept_sparse : boolean, optional
Indicate that func accepts a sparse matrix as input. If validate is
False, this has no effect. Otherwise, if accept_sparse is false,
sparse matrix inputs will cause an exception to be raised.
pass_y: bool, optional default=False
Indicate that transform should forward the y argument to the
inner callable.
"""
def __init__(self, func=None, validate=True,
accept_sparse=False, pass_y=False):
self.func = func
self.validate = validate
self.accept_sparse = accept_sparse
self.pass_y = pass_y
def fit(self, X, y=None):
if self.validate:
check_array(X, self.accept_sparse)
return self
def transform(self, X, y=None):
if self.validate:
X = check_array(X, self.accept_sparse)
func = self.func if self.func is not None else _identity
return func(X, *((y,) if self.pass_y else ()))
| bsd-3-clause |
beni55/networkx | networkx/readwrite/graphml.py | 10 | 21265 | """
*******
GraphML
*******
Read and write graphs in GraphML format.
This implementation does not support mixed graphs (directed and unidirected
edges together), hyperedges, nested graphs, or ports.
"GraphML is a comprehensive and easy-to-use file format for graphs. It
consists of a language core to describe the structural properties of a
graph and a flexible extension mechanism to add application-specific
data. Its main features include support of
* directed, undirected, and mixed graphs,
* hypergraphs,
* hierarchical graphs,
* graphical representations,
* references to external data,
* application-specific attribute data, and
* light-weight parsers.
Unlike many other file formats for graphs, GraphML does not use a
custom syntax. Instead, it is based on XML and hence ideally suited as
a common denominator for all kinds of services generating, archiving,
or processing graphs."
http://graphml.graphdrawing.org/
Format
------
GraphML is an XML format. See
http://graphml.graphdrawing.org/specification.html for the specification and
http://graphml.graphdrawing.org/primer/graphml-primer.html
for examples.
"""
__author__ = """\n""".join(['Salim Fadhley',
'Aric Hagberg ([email protected])'
])
__all__ = ['write_graphml', 'read_graphml', 'generate_graphml',
'parse_graphml', 'GraphMLWriter', 'GraphMLReader']
import networkx as nx
from networkx.utils import open_file, make_str
import warnings
try:
from xml.etree.cElementTree import Element, ElementTree, tostring, fromstring
except ImportError:
try:
from xml.etree.ElementTree import Element, ElementTree, tostring, fromstring
except ImportError:
pass
@open_file(1,mode='wb')
def write_graphml(G, path, encoding='utf-8',prettyprint=True):
"""Write G in GraphML XML format to path
Parameters
----------
G : graph
A networkx graph
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> nx.write_graphml(G, "test.graphml")
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
writer.dump(path)
def generate_graphml(G, encoding='utf-8',prettyprint=True):
"""Generate GraphML lines for G
Parameters
----------
G : graph
A networkx graph
encoding : string (optional)
Encoding for text data.
prettyprint : bool (optional)
If True use line breaks and indenting in output XML.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_graphml(G)) # doctest: +SKIP
>>> for line in nx.generate_graphml(G): # doctest: +SKIP
... print(line)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together) hyperedges, nested graphs, or ports.
"""
writer = GraphMLWriter(encoding=encoding,prettyprint=prettyprint)
writer.add_graph_element(G)
for line in str(writer).splitlines():
yield line
@open_file(0,mode='rb')
def read_graphml(path,node_type=str):
"""Read graph in GraphML format from path.
Parameters
----------
path : file or string
File or filename to write.
Filenames ending in .gz or .bz2 will be compressed.
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
Files with the yEd "yfiles" extension will can be read but the graphics
information is discarded.
yEd compressed files ("file.graphmlz" extension) can be read by renaming
the file to "file.graphml.gz".
"""
reader = GraphMLReader(node_type=node_type)
# need to check for multiple graphs
glist=list(reader(path=path))
return glist[0]
def parse_graphml(graphml_string,node_type=str):
"""Read graph in GraphML format from string.
Parameters
----------
graphml_string : string
String containing graphml information
(e.g., contents of a graphml file).
node_type: Python type (default: str)
Convert node ids to this type
Returns
-------
graph: NetworkX graph
If no parallel edges are found a Graph or DiGraph is returned.
Otherwise a MultiGraph or MultiDiGraph is returned.
Examples
--------
>>> G=nx.path_graph(4)
>>> linefeed=chr(10) # linefeed=\n
>>> s=linefeed.join(nx.generate_graphml(G))
>>> H=nx.parse_graphml(s)
Notes
-----
This implementation does not support mixed graphs (directed and unidirected
edges together), hypergraphs, nested graphs, or ports.
For multigraphs the GraphML edge "id" will be used as the edge
key. If not specified then they "key" attribute will be used. If
there is no "key" attribute a default NetworkX multigraph edge key
will be provided.
"""
reader = GraphMLReader(node_type=node_type)
# need to check for multiple graphs
glist=list(reader(string=graphml_string))
return glist[0]
class GraphML(object):
NS_GRAPHML = "http://graphml.graphdrawing.org/xmlns"
NS_XSI = "http://www.w3.org/2001/XMLSchema-instance"
#xmlns:y="http://www.yworks.com/xml/graphml"
NS_Y = "http://www.yworks.com/xml/graphml"
SCHEMALOCATION = \
' '.join(['http://graphml.graphdrawing.org/xmlns',
'http://graphml.graphdrawing.org/xmlns/1.0/graphml.xsd'])
try:
chr(12345) # Fails on Py!=3.
unicode = str # Py3k's str is our unicode type
long = int # Py3K's int is our long type
except ValueError:
# Python 2.x
pass
types=[(int,"integer"), # for Gephi GraphML bug
(str,"yfiles"),(str,"string"), (unicode,"string"),
(int,"int"), (long,"long"),
(float,"float"), (float,"double"),
(bool, "boolean")]
xml_type = dict(types)
python_type = dict(reversed(a) for a in types)
# http://www.w3.org/TR/xmlschema-2/#boolean
convert_bool = {
'true': True, 'false': False,
'True': True, 'False': False,
'0': False, 0: False,
'1': False, 1: True
}
class GraphMLWriter(GraphML):
def __init__(self, graph=None, encoding="utf-8",prettyprint=True):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML writer requires '
'xml.elementtree.ElementTree')
self.prettyprint=prettyprint
self.encoding = encoding
self.xml = Element("graphml",
{'xmlns':self.NS_GRAPHML,
'xmlns:xsi':self.NS_XSI,
'xsi:schemaLocation':self.SCHEMALOCATION}
)
self.keys={}
if graph is not None:
self.add_graph_element(graph)
def __str__(self):
if self.prettyprint:
self.indent(self.xml)
s=tostring(self.xml).decode(self.encoding)
return s
def get_key(self, name, attr_type, scope, default):
keys_key = (name, attr_type, scope)
try:
return self.keys[keys_key]
except KeyError:
new_id = "d%i" % len(list(self.keys))
self.keys[keys_key] = new_id
key_kwargs = {"id":new_id,
"for":scope,
"attr.name":name,
"attr.type":attr_type}
key_element=Element("key",**key_kwargs)
# add subelement for data default value if present
if default is not None:
default_element=Element("default")
default_element.text=make_str(default)
key_element.append(default_element)
self.xml.insert(0,key_element)
return new_id
def add_data(self, name, element_type, value,
scope="all",
default=None):
"""
Make a data element for an edge or a node. Keep a log of the
type in the keys table.
"""
if element_type not in self.xml_type:
raise nx.NetworkXError('GraphML writer does not support '
'%s as data values.'%element_type)
key_id = self.get_key(name, self.xml_type[element_type], scope, default)
data_element = Element("data", key=key_id)
data_element.text = make_str(value)
return data_element
def add_attributes(self, scope, xml_obj, data, default):
"""Appends attributes to edges or nodes.
"""
for k,v in data.items():
default_value=default.get(k)
obj=self.add_data(make_str(k), type(v), make_str(v),
scope=scope, default=default_value)
xml_obj.append(obj)
def add_nodes(self, G, graph_element):
for node,data in G.nodes(data=True):
node_element = Element("node", id = make_str(node))
default=G.graph.get('node_default',{})
self.add_attributes("node", node_element, data, default)
graph_element.append(node_element)
def add_edges(self, G, graph_element):
if G.is_multigraph():
for u,v,key,data in G.edges(data=True,keys=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
self.add_attributes("edge", edge_element,
{'key':key}, default)
graph_element.append(edge_element)
else:
for u,v,data in G.edges(data=True):
edge_element = Element("edge",source=make_str(u),
target=make_str(v))
default=G.graph.get('edge_default',{})
self.add_attributes("edge", edge_element, data, default)
graph_element.append(edge_element)
def add_graph_element(self, G):
"""
Serialize graph G in GraphML to the stream.
"""
if G.is_directed():
default_edge_type='directed'
else:
default_edge_type='undirected'
graphid=G.graph.pop('id',None)
if graphid is None:
graph_element = Element("graph",
edgedefault = default_edge_type)
else:
graph_element = Element("graph",
edgedefault = default_edge_type,
id=graphid)
default={}
data=dict((k,v) for (k,v) in G.graph.items()
if k not in ['node_default','edge_default'])
self.add_attributes("graph", graph_element, data, default)
self.add_nodes(G,graph_element)
self.add_edges(G,graph_element)
self.xml.append(graph_element)
def add_graphs(self, graph_list):
"""
Add many graphs to this GraphML document.
"""
for G in graph_list:
self.add_graph_element(G)
def dump(self, stream):
if self.prettyprint:
self.indent(self.xml)
document = ElementTree(self.xml)
document.write(stream, encoding=self.encoding, xml_declaration=True)
def indent(self, elem, level=0):
# in-place prettyprint formatter
i = "\n" + level*" "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level+1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
class GraphMLReader(GraphML):
"""Read a GraphML document. Produces NetworkX graph objects.
"""
def __init__(self, node_type=str):
try:
import xml.etree.ElementTree
except ImportError:
raise ImportError('GraphML reader requires '
'xml.elementtree.ElementTree')
self.node_type=node_type
self.multigraph=False # assume multigraph and test for parallel edges
def __call__(self, path=None, string=None):
if path is not None:
self.xml = ElementTree(file=path)
elif string is not None:
self.xml = fromstring(string)
else:
raise ValueError("Must specify either 'path' or 'string' as kwarg.")
(keys,defaults) = self.find_graphml_keys(self.xml)
for g in self.xml.findall("{%s}graph" % self.NS_GRAPHML):
yield self.make_graph(g, keys, defaults)
def make_graph(self, graph_xml, graphml_keys, defaults):
# set default graph type
edgedefault = graph_xml.get("edgedefault", None)
if edgedefault=='directed':
G=nx.MultiDiGraph()
else:
G=nx.MultiGraph()
# set defaults for graph attributes
G.graph['node_default']={}
G.graph['edge_default']={}
for key_id,value in defaults.items():
key_for=graphml_keys[key_id]['for']
name=graphml_keys[key_id]['name']
python_type=graphml_keys[key_id]['type']
if key_for=='node':
G.graph['node_default'].update({name:python_type(value)})
if key_for=='edge':
G.graph['edge_default'].update({name:python_type(value)})
# hyperedges are not supported
hyperedge=graph_xml.find("{%s}hyperedge" % self.NS_GRAPHML)
if hyperedge is not None:
raise nx.NetworkXError("GraphML reader does not support hyperedges")
# add nodes
for node_xml in graph_xml.findall("{%s}node" % self.NS_GRAPHML):
self.add_node(G, node_xml, graphml_keys)
# add edges
for edge_xml in graph_xml.findall("{%s}edge" % self.NS_GRAPHML):
self.add_edge(G, edge_xml, graphml_keys)
# add graph data
data = self.decode_data_elements(graphml_keys, graph_xml)
G.graph.update(data)
# switch to Graph or DiGraph if no parallel edges were found.
if not self.multigraph:
if G.is_directed():
return nx.DiGraph(G)
else:
return nx.Graph(G)
else:
return G
def add_node(self, G, node_xml, graphml_keys):
"""Add a node to the graph.
"""
# warn on finding unsupported ports tag
ports=node_xml.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# find the node by id and cast it to the appropriate type
node_id = self.node_type(node_xml.get("id"))
# get data/attributes for node
data = self.decode_data_elements(graphml_keys, node_xml)
G.add_node(node_id, data)
def add_edge(self, G, edge_element, graphml_keys):
"""Add an edge to the graph.
"""
# warn on finding unsupported ports tag
ports=edge_element.find("{%s}port" % self.NS_GRAPHML)
if ports is not None:
warnings.warn("GraphML port tag not supported.")
# raise error if we find mixed directed and undirected edges
directed = edge_element.get("directed")
if G.is_directed() and directed=='false':
raise nx.NetworkXError(\
"directed=false edge found in directed graph.")
if (not G.is_directed()) and directed=='true':
raise nx.NetworkXError(\
"directed=true edge found in undirected graph.")
source = self.node_type(edge_element.get("source"))
target = self.node_type(edge_element.get("target"))
data = self.decode_data_elements(graphml_keys, edge_element)
# GraphML stores edge ids as an attribute
# NetworkX uses them as keys in multigraphs too if no key
# attribute is specified
edge_id = edge_element.get("id")
if edge_id:
data["id"] = edge_id
if G.has_edge(source,target):
# mark this as a multigraph
self.multigraph=True
if edge_id is None:
# no id specified, try using 'key' attribute as id
edge_id=data.pop('key',None)
G.add_edge(source, target, key=edge_id, **data)
def decode_data_elements(self, graphml_keys, obj_xml):
"""Use the key information to decode the data XML if present."""
data = {}
for data_element in obj_xml.findall("{%s}data" % self.NS_GRAPHML):
key = data_element.get("key")
try:
data_name=graphml_keys[key]['name']
data_type=graphml_keys[key]['type']
except KeyError:
raise nx.NetworkXError("Bad GraphML data: no key %s"%key)
text=data_element.text
# assume anything with subelements is a yfiles extension
if text is not None and len(list(data_element))==0:
if data_type==bool:
data[data_name] = self.convert_bool[text]
else:
data[data_name] = data_type(text)
elif len(list(data_element)) > 0:
# Assume yfiles as subelements, try to extract node_label
node_label = None
for node_type in ['ShapeNode', 'SVGNode', 'ImageNode']:
geometry = data_element.find("{%s}%s/{%s}Geometry" %
(self.NS_Y, node_type, self.NS_Y))
if geometry is not None:
data['x'] = geometry.get('x')
data['y'] = geometry.get('y')
if node_label is None:
node_label = data_element.find("{%s}%s/{%s}NodeLabel" %
(self.NS_Y, node_type, self.NS_Y))
if node_label is not None:
data['label'] = node_label.text
# check all the diffrent types of edges avaivable in yEd.
for e in ['PolyLineEdge', 'SplineEdge', 'QuadCurveEdge', 'BezierEdge', 'ArcEdge']:
edge_label = data_element.find("{%s}%s/{%s}EdgeLabel"%
(self.NS_Y, e, (self.NS_Y)))
if edge_label is not None:
break
if edge_label is not None:
data['label'] = edge_label.text
return data
def find_graphml_keys(self, graph_element):
"""Extracts all the keys and key defaults from the xml.
"""
graphml_keys = {}
graphml_key_defaults = {}
for k in graph_element.findall("{%s}key" % self.NS_GRAPHML):
attr_id = k.get("id")
attr_type=k.get('attr.type')
attr_name=k.get("attr.name")
yfiles_type=k.get("yfiles.type")
if yfiles_type is not None:
attr_name = yfiles_type
attr_type = 'yfiles'
if attr_type is None:
attr_type = "string"
warnings.warn("No key type for id %s. Using string"%attr_id)
if attr_name is None:
raise nx.NetworkXError("Unknown key for id %s in file."%attr_id)
graphml_keys[attr_id] = {
"name":attr_name,
"type":self.python_type[attr_type],
"for":k.get("for")}
# check for "default" subelement of key element
default=k.find("{%s}default" % self.NS_GRAPHML)
if default is not None:
graphml_key_defaults[attr_id]=default.text
return graphml_keys,graphml_key_defaults
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import xml.etree.ElementTree
except:
raise SkipTest("xml.etree.ElementTree not available")
# fixture for nose tests
def teardown_module(module):
import os
try:
os.unlink('test.graphml')
except:
pass
| bsd-3-clause |
isandlaTech/cohorte-3rdparty | unidecode/src/main/python/unidecode/x00b.py | 252 | 4132 | data = (
'[?]', # 0x00
'N', # 0x01
'N', # 0x02
'H', # 0x03
'[?]', # 0x04
'a', # 0x05
'aa', # 0x06
'i', # 0x07
'ii', # 0x08
'u', # 0x09
'uu', # 0x0a
'R', # 0x0b
'L', # 0x0c
'[?]', # 0x0d
'[?]', # 0x0e
'e', # 0x0f
'ai', # 0x10
'[?]', # 0x11
'[?]', # 0x12
'o', # 0x13
'au', # 0x14
'k', # 0x15
'kh', # 0x16
'g', # 0x17
'gh', # 0x18
'ng', # 0x19
'c', # 0x1a
'ch', # 0x1b
'j', # 0x1c
'jh', # 0x1d
'ny', # 0x1e
'tt', # 0x1f
'tth', # 0x20
'dd', # 0x21
'ddh', # 0x22
'nn', # 0x23
't', # 0x24
'th', # 0x25
'd', # 0x26
'dh', # 0x27
'n', # 0x28
'[?]', # 0x29
'p', # 0x2a
'ph', # 0x2b
'b', # 0x2c
'bh', # 0x2d
'm', # 0x2e
'y', # 0x2f
'r', # 0x30
'[?]', # 0x31
'l', # 0x32
'll', # 0x33
'[?]', # 0x34
'', # 0x35
'sh', # 0x36
'ss', # 0x37
's', # 0x38
'h', # 0x39
'[?]', # 0x3a
'[?]', # 0x3b
'\'', # 0x3c
'\'', # 0x3d
'aa', # 0x3e
'i', # 0x3f
'ii', # 0x40
'u', # 0x41
'uu', # 0x42
'R', # 0x43
'[?]', # 0x44
'[?]', # 0x45
'[?]', # 0x46
'e', # 0x47
'ai', # 0x48
'[?]', # 0x49
'[?]', # 0x4a
'o', # 0x4b
'au', # 0x4c
'', # 0x4d
'[?]', # 0x4e
'[?]', # 0x4f
'[?]', # 0x50
'[?]', # 0x51
'[?]', # 0x52
'[?]', # 0x53
'[?]', # 0x54
'[?]', # 0x55
'+', # 0x56
'+', # 0x57
'[?]', # 0x58
'[?]', # 0x59
'[?]', # 0x5a
'[?]', # 0x5b
'rr', # 0x5c
'rh', # 0x5d
'[?]', # 0x5e
'yy', # 0x5f
'RR', # 0x60
'LL', # 0x61
'[?]', # 0x62
'[?]', # 0x63
'[?]', # 0x64
'[?]', # 0x65
'0', # 0x66
'1', # 0x67
'2', # 0x68
'3', # 0x69
'4', # 0x6a
'5', # 0x6b
'6', # 0x6c
'7', # 0x6d
'8', # 0x6e
'9', # 0x6f
'', # 0x70
'[?]', # 0x71
'[?]', # 0x72
'[?]', # 0x73
'[?]', # 0x74
'[?]', # 0x75
'[?]', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
'[?]', # 0x80
'[?]', # 0x81
'N', # 0x82
'H', # 0x83
'[?]', # 0x84
'a', # 0x85
'aa', # 0x86
'i', # 0x87
'ii', # 0x88
'u', # 0x89
'uu', # 0x8a
'[?]', # 0x8b
'[?]', # 0x8c
'[?]', # 0x8d
'e', # 0x8e
'ee', # 0x8f
'ai', # 0x90
'[?]', # 0x91
'o', # 0x92
'oo', # 0x93
'au', # 0x94
'k', # 0x95
'[?]', # 0x96
'[?]', # 0x97
'[?]', # 0x98
'ng', # 0x99
'c', # 0x9a
'[?]', # 0x9b
'j', # 0x9c
'[?]', # 0x9d
'ny', # 0x9e
'tt', # 0x9f
'[?]', # 0xa0
'[?]', # 0xa1
'[?]', # 0xa2
'nn', # 0xa3
't', # 0xa4
'[?]', # 0xa5
'[?]', # 0xa6
'[?]', # 0xa7
'n', # 0xa8
'nnn', # 0xa9
'p', # 0xaa
'[?]', # 0xab
'[?]', # 0xac
'[?]', # 0xad
'm', # 0xae
'y', # 0xaf
'r', # 0xb0
'rr', # 0xb1
'l', # 0xb2
'll', # 0xb3
'lll', # 0xb4
'v', # 0xb5
'[?]', # 0xb6
'ss', # 0xb7
's', # 0xb8
'h', # 0xb9
'[?]', # 0xba
'[?]', # 0xbb
'[?]', # 0xbc
'[?]', # 0xbd
'aa', # 0xbe
'i', # 0xbf
'ii', # 0xc0
'u', # 0xc1
'uu', # 0xc2
'[?]', # 0xc3
'[?]', # 0xc4
'[?]', # 0xc5
'e', # 0xc6
'ee', # 0xc7
'ai', # 0xc8
'[?]', # 0xc9
'o', # 0xca
'oo', # 0xcb
'au', # 0xcc
'', # 0xcd
'[?]', # 0xce
'[?]', # 0xcf
'[?]', # 0xd0
'[?]', # 0xd1
'[?]', # 0xd2
'[?]', # 0xd3
'[?]', # 0xd4
'[?]', # 0xd5
'[?]', # 0xd6
'+', # 0xd7
'[?]', # 0xd8
'[?]', # 0xd9
'[?]', # 0xda
'[?]', # 0xdb
'[?]', # 0xdc
'[?]', # 0xdd
'[?]', # 0xde
'[?]', # 0xdf
'[?]', # 0xe0
'[?]', # 0xe1
'[?]', # 0xe2
'[?]', # 0xe3
'[?]', # 0xe4
'[?]', # 0xe5
'0', # 0xe6
'1', # 0xe7
'2', # 0xe8
'3', # 0xe9
'4', # 0xea
'5', # 0xeb
'6', # 0xec
'7', # 0xed
'8', # 0xee
'9', # 0xef
'+10+', # 0xf0
'+100+', # 0xf1
'+1000+', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| apache-2.0 |
angstwad/ansible | lib/ansible/module_utils/netcmd.py | 3 | 6931 | # This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2015 Peter Sprygada, <[email protected]>
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import re
import time
import itertools
import shlex
from ansible.module_utils.basic import BOOLEANS_TRUE, BOOLEANS_FALSE
class Conditional(object):
"""Used in command modules to evaluate waitfor conditions
"""
OPERATORS = {
'eq': ['eq', '=='],
'neq': ['neq', 'ne', '!='],
'gt': ['gt', '>'],
'ge': ['ge', '>='],
'lt': ['lt', '<'],
'le': ['le', '<='],
'contains': ['contains'],
'matches': ['matches']
}
def __init__(self, conditional, encoding='json'):
self.raw = conditional
self.encoding = encoding
key, op, val = shlex.split(conditional)
self.key = key
self.func = self.func(op)
self.value = self._cast_value(val)
def __call__(self, data):
value = self.get_value(dict(result=data))
return self.func(value)
def _cast_value(self, value):
if value in BOOLEANS_TRUE:
return True
elif value in BOOLEANS_FALSE:
return False
elif re.match(r'^\d+\.d+$', value):
return float(value)
elif re.match(r'^\d+$', value):
return int(value)
else:
return unicode(value)
def func(self, oper):
for func, operators in self.OPERATORS.items():
if oper in operators:
return getattr(self, func)
raise AttributeError('unknown operator: %s' % oper)
def get_value(self, result):
if self.encoding in ['json', 'text']:
return self.get_json(result)
elif self.encoding == 'xml':
return self.get_xml(result.get('result'))
def get_xml(self, result):
parts = self.key.split('.')
value_index = None
match = re.match(r'^\S+(\[)(\d+)\]', parts[-1])
if match:
start, end = match.regs[1]
parts[-1] = parts[-1][0:start]
value_index = int(match.group(2))
path = '/'.join(parts[1:])
path = '/%s' % path
path += '/text()'
index = int(re.match(r'result\[(\d+)\]', parts[0]).group(1))
values = result[index].xpath(path)
if value_index is not None:
return values[value_index].strip()
return [v.strip() for v in values]
def get_json(self, result):
parts = re.split(r'\.(?=[^\]]*(?:\[|$))', self.key)
for part in parts:
match = re.findall(r'\[(\S+?)\]', part)
if match:
key = part[:part.find('[')]
result = result[key]
for m in match:
try:
m = int(m)
except ValueError:
m = str(m)
result = result[m]
else:
result = result.get(part)
return result
def number(self, value):
if '.' in str(value):
return float(value)
else:
return int(value)
def eq(self, value):
return value == self.value
def neq(self, value):
return value != self.value
def gt(self, value):
return self.number(value) > self.value
def ge(self, value):
return self.number(value) >= self.value
def lt(self, value):
return self.number(value) < self.value
def le(self, value):
return self.number(value) <= self.value
def contains(self, value):
return str(self.value) in value
def matches(self, value):
match = re.search(value, self.value, re.M)
return match is not None
class FailedConditionsError(Exception):
def __init__(self, msg, failed_conditions):
super(FailedConditionsError, self).__init__(msg)
self.failed_conditions = failed_conditions
class CommandRunner(object):
def __init__(self, module):
self.module = module
self.items = list()
self.conditionals = set()
self.retries = 10
self.interval = 1
self._cache = dict()
def add_command(self, command, output=None):
self.module.cli.add_commands(command, output=output)
def get_command(self, command):
try:
cmdobj = self._cache[command]
return cmdobj.response
except KeyError:
for cmd in self.module.cli.commands:
if str(cmd) == command:
self._cache[command] = cmd
return cmd.response
raise ValueError("command '%s' not found" % command)
def add_conditional(self, condition):
self.conditionals.add(Conditional(condition))
def run_commands(self):
responses = self.module.cli.run_commands()
self.items = responses
def run(self):
while self.retries > 0:
self.run_commands()
for item in list(self.conditionals):
if item(self.items):
self.conditionals.remove(item)
if not self.conditionals:
break
time.sleep(self.interval)
self.retries -= 1
else:
failed_conditions = [item.raw for item in self.conditionals]
raise FailedConditionsError('timeout waiting for value', failed_conditions)
| gpl-3.0 |
oscar810429/mysql-5.6_facebook | xtrabackup/test/python/testtools/content.py | 43 | 8747 | # Copyright (c) 2009-2011 testtools developers. See LICENSE for details.
"""Content - a MIME-like Content object."""
__all__ = [
'attach_file',
'Content',
'content_from_file',
'content_from_stream',
'text_content',
'TracebackContent',
]
import codecs
import os
from testtools import try_import
from testtools.compat import _b
from testtools.content_type import ContentType, UTF8_TEXT
from testtools.testresult import TestResult
functools = try_import('functools')
_join_b = _b("").join
DEFAULT_CHUNK_SIZE = 4096
def _iter_chunks(stream, chunk_size):
"""Read 'stream' in chunks of 'chunk_size'.
:param stream: A file-like object to read from.
:param chunk_size: The size of each read from 'stream'.
"""
chunk = stream.read(chunk_size)
while chunk:
yield chunk
chunk = stream.read(chunk_size)
class Content(object):
"""A MIME-like Content object.
Content objects can be serialised to bytes using the iter_bytes method.
If the Content-Type is recognised by other code, they are welcome to
look for richer contents that mere byte serialisation - for example in
memory object graphs etc. However, such code MUST be prepared to receive
a generic Content object that has been reconstructed from a byte stream.
:ivar content_type: The content type of this Content.
"""
def __init__(self, content_type, get_bytes):
"""Create a ContentType."""
if None in (content_type, get_bytes):
raise ValueError("None not permitted in %r, %r" % (
content_type, get_bytes))
self.content_type = content_type
self._get_bytes = get_bytes
def __eq__(self, other):
return (self.content_type == other.content_type and
_join_b(self.iter_bytes()) == _join_b(other.iter_bytes()))
def iter_bytes(self):
"""Iterate over bytestrings of the serialised content."""
return self._get_bytes()
def iter_text(self):
"""Iterate over the text of the serialised content.
This is only valid for text MIME types, and will use ISO-8859-1 if
no charset parameter is present in the MIME type. (This is somewhat
arbitrary, but consistent with RFC2617 3.7.1).
:raises ValueError: If the content type is not text/\*.
"""
if self.content_type.type != "text":
raise ValueError("Not a text type %r" % self.content_type)
return self._iter_text()
def _iter_text(self):
"""Worker for iter_text - does the decoding."""
encoding = self.content_type.parameters.get('charset', 'ISO-8859-1')
try:
# 2.5+
decoder = codecs.getincrementaldecoder(encoding)()
for bytes in self.iter_bytes():
yield decoder.decode(bytes)
final = decoder.decode(_b(''), True)
if final:
yield final
except AttributeError:
# < 2.5
bytes = ''.join(self.iter_bytes())
yield bytes.decode(encoding)
def __repr__(self):
return "<Content type=%r, value=%r>" % (
self.content_type, _join_b(self.iter_bytes()))
class TracebackContent(Content):
"""Content object for tracebacks.
This adapts an exc_info tuple to the Content interface.
text/x-traceback;language=python is used for the mime type, in order to
provide room for other languages to format their tracebacks differently.
"""
def __init__(self, err, test):
"""Create a TracebackContent for err."""
if err is None:
raise ValueError("err may not be None")
content_type = ContentType('text', 'x-traceback',
{"language": "python", "charset": "utf8"})
self._result = TestResult()
value = self._result._exc_info_to_unicode(err, test)
super(TracebackContent, self).__init__(
content_type, lambda: [value.encode("utf8")])
def text_content(text):
"""Create a `Content` object from some text.
This is useful for adding details which are short strings.
"""
return Content(UTF8_TEXT, lambda: [text.encode('utf8')])
def maybe_wrap(wrapper, func):
"""Merge metadata for func into wrapper if functools is present."""
if functools is not None:
wrapper = functools.update_wrapper(wrapper, func)
return wrapper
def content_from_file(path, content_type=None, chunk_size=DEFAULT_CHUNK_SIZE,
buffer_now=False):
"""Create a `Content` object from a file on disk.
Note that unless 'read_now' is explicitly passed in as True, the file
will only be read from when ``iter_bytes`` is called.
:param path: The path to the file to be used as content.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to `DEFAULT_CHUNK_SIZE`.
:param buffer_now: If True, read the file from disk now and keep it in
memory. Otherwise, only read when the content is serialized.
"""
if content_type is None:
content_type = UTF8_TEXT
def reader():
# This should be try:finally:, but python2.4 makes that hard. When
# We drop older python support we can make this use a context manager
# for maximum simplicity.
stream = open(path, 'rb')
for chunk in _iter_chunks(stream, chunk_size):
yield chunk
stream.close()
return content_from_reader(reader, content_type, buffer_now)
def content_from_stream(stream, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=False):
"""Create a `Content` object from a file-like stream.
Note that the stream will only be read from when ``iter_bytes`` is
called.
:param stream: A file-like object to read the content from. The stream
is not closed by this function or the content object it returns.
:param content_type: The type of content. If not specified, defaults
to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file.
Defaults to `DEFAULT_CHUNK_SIZE`.
:param buffer_now: If True, reads from the stream right now. Otherwise,
only reads when the content is serialized. Defaults to False.
"""
if content_type is None:
content_type = UTF8_TEXT
reader = lambda: _iter_chunks(stream, chunk_size)
return content_from_reader(reader, content_type, buffer_now)
def content_from_reader(reader, content_type, buffer_now):
"""Create a Content object that will obtain the content from reader.
:param reader: A callback to read the content. Should return an iterable of
bytestrings.
:param content_type: The content type to create.
:param buffer_now: If True the reader is evaluated immediately and
buffered.
"""
if content_type is None:
content_type = UTF8_TEXT
if buffer_now:
contents = list(reader())
reader = lambda: contents
return Content(content_type, reader)
def attach_file(detailed, path, name=None, content_type=None,
chunk_size=DEFAULT_CHUNK_SIZE, buffer_now=True):
"""Attach a file to this test as a detail.
This is a convenience method wrapping around `addDetail`.
Note that unless 'read_now' is explicitly passed in as True, the file
*must* exist when the test result is called with the results of this
test, after the test has been torn down.
:param detailed: An object with details
:param path: The path to the file to attach.
:param name: The name to give to the detail for the attached file.
:param content_type: The content type of the file. If not provided,
defaults to UTF8-encoded text/plain.
:param chunk_size: The size of chunks to read from the file. Defaults
to something sensible.
:param buffer_now: If False the file content is read when the content
object is evaluated rather than when attach_file is called.
Note that this may be after any cleanups that obj_with_details has, so
if the file is a temporary file disabling buffer_now may cause the file
to be read after it is deleted. To handle those cases, using
attach_file as a cleanup is recommended because it guarantees a
sequence for when the attach_file call is made::
detailed.addCleanup(attach_file, 'foo.txt', detailed)
"""
if name is None:
name = os.path.basename(path)
content_object = content_from_file(
path, content_type, chunk_size, buffer_now)
detailed.addDetail(name, content_object)
| gpl-2.0 |
linmingren/ShadowsocksFork | shadowsocks/common.py | 945 | 8921 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import socket
import struct
import logging
def compat_ord(s):
if type(s) == int:
return s
return _ord(s)
def compat_chr(d):
if bytes == str:
return _chr(d)
return bytes([d])
_ord = ord
_chr = chr
ord = compat_ord
chr = compat_chr
def to_bytes(s):
if bytes != str:
if type(s) == str:
return s.encode('utf-8')
return s
def to_str(s):
if bytes != str:
if type(s) == bytes:
return s.decode('utf-8')
return s
def inet_ntop(family, ipstr):
if family == socket.AF_INET:
return to_bytes(socket.inet_ntoa(ipstr))
elif family == socket.AF_INET6:
import re
v6addr = ':'.join(('%02X%02X' % (ord(i), ord(j))).lstrip('0')
for i, j in zip(ipstr[::2], ipstr[1::2]))
v6addr = re.sub('::+', '::', v6addr, count=1)
return to_bytes(v6addr)
def inet_pton(family, addr):
addr = to_str(addr)
if family == socket.AF_INET:
return socket.inet_aton(addr)
elif family == socket.AF_INET6:
if '.' in addr: # a v4 addr
v4addr = addr[addr.rindex(':') + 1:]
v4addr = socket.inet_aton(v4addr)
v4addr = map(lambda x: ('%02X' % ord(x)), v4addr)
v4addr.insert(2, ':')
newaddr = addr[:addr.rindex(':') + 1] + ''.join(v4addr)
return inet_pton(family, newaddr)
dbyts = [0] * 8 # 8 groups
grps = addr.split(':')
for i, v in enumerate(grps):
if v:
dbyts[i] = int(v, 16)
else:
for j, w in enumerate(grps[::-1]):
if w:
dbyts[7 - j] = int(w, 16)
else:
break
break
return b''.join((chr(i // 256) + chr(i % 256)) for i in dbyts)
else:
raise RuntimeError("What family?")
def is_ip(address):
for family in (socket.AF_INET, socket.AF_INET6):
try:
if type(address) != str:
address = address.decode('utf8')
inet_pton(family, address)
return family
except (TypeError, ValueError, OSError, IOError):
pass
return False
def patch_socket():
if not hasattr(socket, 'inet_pton'):
socket.inet_pton = inet_pton
if not hasattr(socket, 'inet_ntop'):
socket.inet_ntop = inet_ntop
patch_socket()
ADDRTYPE_IPV4 = 1
ADDRTYPE_IPV6 = 4
ADDRTYPE_HOST = 3
def pack_addr(address):
address_str = to_str(address)
for family in (socket.AF_INET, socket.AF_INET6):
try:
r = socket.inet_pton(family, address_str)
if family == socket.AF_INET6:
return b'\x04' + r
else:
return b'\x01' + r
except (TypeError, ValueError, OSError, IOError):
pass
if len(address) > 255:
address = address[:255] # TODO
return b'\x03' + chr(len(address)) + address
def parse_header(data):
addrtype = ord(data[0])
dest_addr = None
dest_port = None
header_length = 0
if addrtype == ADDRTYPE_IPV4:
if len(data) >= 7:
dest_addr = socket.inet_ntoa(data[1:5])
dest_port = struct.unpack('>H', data[5:7])[0]
header_length = 7
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_HOST:
if len(data) > 2:
addrlen = ord(data[1])
if len(data) >= 2 + addrlen:
dest_addr = data[2:2 + addrlen]
dest_port = struct.unpack('>H', data[2 + addrlen:4 +
addrlen])[0]
header_length = 4 + addrlen
else:
logging.warn('header is too short')
else:
logging.warn('header is too short')
elif addrtype == ADDRTYPE_IPV6:
if len(data) >= 19:
dest_addr = socket.inet_ntop(socket.AF_INET6, data[1:17])
dest_port = struct.unpack('>H', data[17:19])[0]
header_length = 19
else:
logging.warn('header is too short')
else:
logging.warn('unsupported addrtype %d, maybe wrong password or '
'encryption method' % addrtype)
if dest_addr is None:
return None
return addrtype, to_bytes(dest_addr), dest_port, header_length
class IPNetwork(object):
ADDRLENGTH = {socket.AF_INET: 32, socket.AF_INET6: 128, False: 0}
def __init__(self, addrs):
self._network_list_v4 = []
self._network_list_v6 = []
if type(addrs) == str:
addrs = addrs.split(',')
list(map(self.add_network, addrs))
def add_network(self, addr):
if addr is "":
return
block = addr.split('/')
addr_family = is_ip(block[0])
addr_len = IPNetwork.ADDRLENGTH[addr_family]
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(block[0]))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, block[0]))
ip = (hi << 64) | lo
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if len(block) is 1:
prefix_size = 0
while (ip & 1) == 0 and ip is not 0:
ip >>= 1
prefix_size += 1
logging.warn("You did't specify CIDR routing prefix size for %s, "
"implicit treated as %s/%d" % (addr, addr, addr_len))
elif block[1].isdigit() and int(block[1]) <= addr_len:
prefix_size = addr_len - int(block[1])
ip >>= prefix_size
else:
raise Exception("Not a valid CIDR notation: %s" % addr)
if addr_family is socket.AF_INET:
self._network_list_v4.append((ip, prefix_size))
else:
self._network_list_v6.append((ip, prefix_size))
def __contains__(self, addr):
addr_family = is_ip(addr)
if addr_family is socket.AF_INET:
ip, = struct.unpack("!I", socket.inet_aton(addr))
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v4))
elif addr_family is socket.AF_INET6:
hi, lo = struct.unpack("!QQ", inet_pton(addr_family, addr))
ip = (hi << 64) | lo
return any(map(lambda n_ps: n_ps[0] == ip >> n_ps[1],
self._network_list_v6))
else:
return False
def test_inet_conv():
ipv4 = b'8.8.4.4'
b = inet_pton(socket.AF_INET, ipv4)
assert inet_ntop(socket.AF_INET, b) == ipv4
ipv6 = b'2404:6800:4005:805::1011'
b = inet_pton(socket.AF_INET6, ipv6)
assert inet_ntop(socket.AF_INET6, b) == ipv6
def test_parse_header():
assert parse_header(b'\x03\x0ewww.google.com\x00\x50') == \
(3, b'www.google.com', 80, 18)
assert parse_header(b'\x01\x08\x08\x08\x08\x00\x35') == \
(1, b'8.8.8.8', 53, 7)
assert parse_header((b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00'
b'\x00\x10\x11\x00\x50')) == \
(4, b'2404:6800:4005:805::1011', 80, 19)
def test_pack_header():
assert pack_addr(b'8.8.8.8') == b'\x01\x08\x08\x08\x08'
assert pack_addr(b'2404:6800:4005:805::1011') == \
b'\x04$\x04h\x00@\x05\x08\x05\x00\x00\x00\x00\x00\x00\x10\x11'
assert pack_addr(b'www.google.com') == b'\x03\x0ewww.google.com'
def test_ip_network():
ip_network = IPNetwork('127.0.0.0/24,::ff:1/112,::1,192.168.1.1,192.0.2.0')
assert '127.0.0.1' in ip_network
assert '127.0.1.1' not in ip_network
assert ':ff:ffff' in ip_network
assert '::ffff:1' not in ip_network
assert '::1' in ip_network
assert '::2' not in ip_network
assert '192.168.1.1' in ip_network
assert '192.168.1.2' not in ip_network
assert '192.0.2.1' in ip_network
assert '192.0.3.1' in ip_network # 192.0.2.0 is treated as 192.0.2.0/23
assert 'www.google.com' not in ip_network
if __name__ == '__main__':
test_inet_conv()
test_parse_header()
test_pack_header()
test_ip_network()
| apache-2.0 |
leekchan/django_test | django/contrib/formtools/tests/wizard/test_forms.py | 35 | 8944 | from __future__ import unicode_literals
from importlib import import_module
from django import forms, http
from django.conf import settings
from django.db import models
from django.test import TestCase
from django.template.response import TemplateResponse
from django.contrib.auth.models import User
from django.contrib.formtools.wizard.views import (WizardView,
SessionWizardView,
CookieWizardView)
class DummyRequest(http.HttpRequest):
def __init__(self, POST=None):
super(DummyRequest, self).__init__()
self.method = "POST" if POST else "GET"
if POST is not None:
self.POST.update(POST)
self.session = {}
self._dont_enforce_csrf_checks = True
def get_request(*args, **kwargs):
request = DummyRequest(*args, **kwargs)
engine = import_module(settings.SESSION_ENGINE)
request.session = engine.SessionStore(None)
return request
class Step1(forms.Form):
name = forms.CharField()
class Step2(forms.Form):
name = forms.CharField()
class Step3(forms.Form):
data = forms.CharField()
class CustomKwargsStep1(Step1):
def __init__(self, test=None, *args, **kwargs):
self.test = test
super(CustomKwargsStep1, self).__init__(*args, **kwargs)
class TestModel(models.Model):
name = models.CharField(max_length=100)
class Meta:
app_label = 'formtools'
class TestModelForm(forms.ModelForm):
class Meta:
model = TestModel
fields = '__all__'
TestModelFormSet = forms.models.modelformset_factory(TestModel, form=TestModelForm, extra=2,
fields='__all__')
class TestWizard(WizardView):
storage_name = 'django.contrib.formtools.wizard.storage.session.SessionStorage'
def dispatch(self, request, *args, **kwargs):
response = super(TestWizard, self).dispatch(request, *args, **kwargs)
return response, self
def get_form_kwargs(self, step, *args, **kwargs):
kwargs = super(TestWizard, self).get_form_kwargs(step, *args, **kwargs)
if step == 'kwargs_test':
kwargs['test'] = True
return kwargs
class TestWizardWithInitAttrs(TestWizard):
form_list = [Step1, Step2]
condition_dict = {'step2': True}
initial_dict = {'start': {'name': 'value1'}}
instance_dict = {'start': User()}
class FormTests(TestCase):
def test_form_init(self):
testform = TestWizard.get_initkwargs([Step1, Step2])
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
testform = TestWizard.get_initkwargs([('start', Step1), ('step2', Step2)])
self.assertEqual(
testform['form_list'], {'start': Step1, 'step2': Step2})
testform = TestWizard.get_initkwargs([Step1, Step2, ('finish', Step3)])
self.assertEqual(
testform['form_list'], {'0': Step1, '1': Step2, 'finish': Step3})
testform = TestWizardWithInitAttrs.get_initkwargs()
self.assertEqual(testform['form_list'], {'0': Step1, '1': Step2})
def test_first_step(self):
request = get_request()
testform = TestWizard.as_view([Step1, Step2])
response, instance = testform(request)
self.assertEqual(instance.steps.current, '0')
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
def test_persistence(self):
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request = get_request({'test_wizard-current_step': 'start',
'name': 'data1'})
response, instance = testform(request)
self.assertEqual(instance.steps.current, 'start')
instance.storage.current_step = 'step2'
testform2 = TestWizard.as_view([('start', Step1), ('step2', Step2)])
request.POST = {'test_wizard-current_step': 'step2'}
response, instance = testform2(request)
self.assertEqual(instance.steps.current, 'step2')
def test_form_condition(self):
request = get_request()
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': True})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
testform = TestWizard.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)],
condition_dict={'step2': False})
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step3')
testform = TestWizardWithInitAttrs.as_view(
[('start', Step1), ('step2', Step2), ('step3', Step3)])
response, instance = testform(request)
self.assertEqual(instance.get_next_step(), 'step2')
def test_form_kwargs(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1),
('kwargs_test', CustomKwargsStep1)])
response, instance = testform(request)
self.assertEqual(instance.get_form_kwargs('start'), {})
self.assertEqual(instance.get_form_kwargs('kwargs_test'), {'test': True})
self.assertEqual(instance.get_form('kwargs_test').test, True)
def test_form_prefix(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_prefix(), 'start')
self.assertEqual(instance.get_form_prefix('another'), 'another')
def test_form_initial(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)],
initial_dict={'start': {'name': 'value1'}})
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
testform = TestWizardWithInitAttrs.as_view(
[('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(instance.get_form_initial('start'), {'name': 'value1'})
self.assertEqual(instance.get_form_initial('step2'), {})
def test_form_instance(self):
request = get_request()
the_instance = TestModel()
testform = TestWizard.as_view([('start', TestModelForm), ('step2', Step2)],
instance_dict={'start': the_instance})
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
the_instance)
self.assertEqual(
instance.get_form_instance('non_exist_instance'),
None)
testform = TestWizardWithInitAttrs.as_view(
[('start', TestModelForm), ('step2', Step2)])
response, instance = testform(request)
self.assertEqual(
instance.get_form_instance('start'),
TestWizardWithInitAttrs.instance_dict['start'])
def test_formset_instance(self):
request = get_request()
the_instance1, created = TestModel.objects.get_or_create(
name='test object 1')
the_instance2, created = TestModel.objects.get_or_create(
name='test object 2')
testform = TestWizard.as_view([('start', TestModelFormSet), ('step2', Step2)],
instance_dict={'start': TestModel.objects.filter(name='test object 1')})
response, instance = testform(request)
self.assertEqual(list(instance.get_form_instance('start')), [the_instance1])
self.assertEqual(instance.get_form_instance('non_exist_instance'), None)
self.assertEqual(instance.get_form().initial_form_count(), 1)
def test_done(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
self.assertRaises(NotImplementedError, instance.done, None)
def test_revalidation(self):
request = get_request()
testform = TestWizard.as_view([('start', Step1), ('step2', Step2)])
response, instance = testform(request)
instance.render_done(None)
self.assertEqual(instance.storage.current_step, 'start')
class SessionFormTests(TestCase):
def test_init(self):
request = get_request()
testform = SessionWizardView.as_view([('start', Step1)])
self.assertIsInstance(testform(request), TemplateResponse)
class CookieFormTests(TestCase):
def test_init(self):
request = get_request()
testform = CookieWizardView.as_view([('start', Step1)])
self.assertIsInstance(testform(request), TemplateResponse)
| bsd-3-clause |
GtTmy/pyeda | pyeda/parsing/test/test_pla.py | 5 | 1219 | """
Test PLA parsing functions
"""
from nose.tools import assert_raises
from pyeda.boolalg.espresso import FTYPE, DTYPE, RTYPE
from pyeda.parsing.pla import Error, parse
BASIC = """
# Filename: basic.pla
.i 4
.o 2
.ilb x y z
.ob f g
.p 3
.type fr
0001 00
0010 01
0100 10
1000 11
.e
"""
def test_errors():
# General syntax error
assert_raises(Error, parse, "foo\nbar\nfiz\nbuz\n")
# .i declared more than once
assert_raises(Error, parse, ".i 1\n.i 2\n")
# .o declared more than once
assert_raises(Error, parse, ".o 1\n.o 2\n")
# .ilb declared more than once
assert_raises(Error, parse, ".ilb a b\n.ilb c d\n")
# .ob declared more than once
assert_raises(Error, parse, ".ob a b\n.ob c d\n")
# .type declared more than once
assert_raises(Error, parse, ".type f\n.type r\n")
def test_basic():
d = parse(BASIC)
assert d == {
'ninputs': 4,
'noutputs': 2,
'input_labels': ['x', 'y', 'z'],
'output_labels': ['f', 'g'],
'intype': FTYPE | RTYPE,
'cover': {
((1, 1, 1, 2), (0, 0)),
((1, 1, 2, 1), (0, 1)),
((1, 2, 1, 1), (1, 0)),
((2, 1, 1, 1), (1, 1))
},
}
| bsd-2-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.