repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
BaladiDogGames/baladidoggames.github.io | mingw/bin/lib/string.py | 124 | 20756 | """A collection of string operations (most are no longer used).
Warning: most of the code you see here isn't normally used nowadays.
Beginning with Python 1.6, many of these functions are implemented as
methods on the standard string object. They used to be implemented by
a built-in module called strop, but strop is now obsolete itself.
Public module variables:
whitespace -- a string containing all characters considered whitespace
lowercase -- a string containing all characters considered lowercase letters
uppercase -- a string containing all characters considered uppercase letters
letters -- a string containing all characters considered letters
digits -- a string containing all characters considered decimal digits
hexdigits -- a string containing all characters considered hexadecimal digits
octdigits -- a string containing all characters considered octal digits
punctuation -- a string containing all characters considered punctuation
printable -- a string containing all characters considered printable
"""
# Some strings for ctype-style character classification
whitespace = ' \t\n\r\v\f'
lowercase = 'abcdefghijklmnopqrstuvwxyz'
uppercase = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
letters = lowercase + uppercase
ascii_lowercase = lowercase
ascii_uppercase = uppercase
ascii_letters = ascii_lowercase + ascii_uppercase
digits = '0123456789'
hexdigits = digits + 'abcdef' + 'ABCDEF'
octdigits = '01234567'
punctuation = """!"#$%&'()*+,-./:;<=>?@[\]^_`{|}~"""
printable = digits + letters + punctuation + whitespace
# Case conversion helpers
# Use str to convert Unicode literal in case of -U
l = map(chr, xrange(256))
_idmap = str('').join(l)
del l
# Functions which aren't available as string methods.
# Capitalize the words in a string, e.g. " aBc dEf " -> "Abc Def".
def capwords(s, sep=None):
"""capwords(s [,sep]) -> string
Split the argument into words using split, capitalize each
word using capitalize, and join the capitalized words using
join. If the optional second argument sep is absent or None,
runs of whitespace characters are replaced by a single space
and leading and trailing whitespace are removed, otherwise
sep is used to split and join the words.
"""
return (sep or ' ').join(x.capitalize() for x in s.split(sep))
# Construct a translation string
_idmapL = None
def maketrans(fromstr, tostr):
"""maketrans(frm, to) -> string
Return a translation table (a string of 256 bytes long)
suitable for use in string.translate. The strings frm and to
must be of the same length.
"""
if len(fromstr) != len(tostr):
raise ValueError, "maketrans arguments must have same length"
global _idmapL
if not _idmapL:
_idmapL = list(_idmap)
L = _idmapL[:]
fromstr = map(ord, fromstr)
for i in range(len(fromstr)):
L[fromstr[i]] = tostr[i]
return ''.join(L)
####################################################################
import re as _re
class _multimap:
"""Helper class for combining multiple mappings.
Used by .{safe_,}substitute() to combine the mapping and keyword
arguments.
"""
def __init__(self, primary, secondary):
self._primary = primary
self._secondary = secondary
def __getitem__(self, key):
try:
return self._primary[key]
except KeyError:
return self._secondary[key]
class _TemplateMetaclass(type):
pattern = r"""
%(delim)s(?:
(?P<escaped>%(delim)s) | # Escape sequence of two delimiters
(?P<named>%(id)s) | # delimiter and a Python identifier
{(?P<braced>%(id)s)} | # delimiter and a braced identifier
(?P<invalid>) # Other ill-formed delimiter exprs
)
"""
def __init__(cls, name, bases, dct):
super(_TemplateMetaclass, cls).__init__(name, bases, dct)
if 'pattern' in dct:
pattern = cls.pattern
else:
pattern = _TemplateMetaclass.pattern % {
'delim' : _re.escape(cls.delimiter),
'id' : cls.idpattern,
}
cls.pattern = _re.compile(pattern, _re.IGNORECASE | _re.VERBOSE)
class Template:
"""A string class for supporting $-substitutions."""
__metaclass__ = _TemplateMetaclass
delimiter = '$'
idpattern = r'[_a-z][_a-z0-9]*'
def __init__(self, template):
self.template = template
# Search for $$, $identifier, ${identifier}, and any bare $'s
def _invalid(self, mo):
i = mo.start('invalid')
lines = self.template[:i].splitlines(True)
if not lines:
colno = 1
lineno = 1
else:
colno = i - len(''.join(lines[:-1]))
lineno = len(lines)
raise ValueError('Invalid placeholder in string: line %d, col %d' %
(lineno, colno))
def substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
# Check the most common path first.
named = mo.group('named') or mo.group('braced')
if named is not None:
val = mapping[named]
# We use this idiom instead of str() because the latter will
# fail if val is a Unicode containing non-ASCII characters.
return '%s' % (val,)
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
self._invalid(mo)
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
def safe_substitute(self, *args, **kws):
if len(args) > 1:
raise TypeError('Too many positional arguments')
if not args:
mapping = kws
elif kws:
mapping = _multimap(kws, args[0])
else:
mapping = args[0]
# Helper function for .sub()
def convert(mo):
named = mo.group('named')
if named is not None:
try:
# We use this idiom instead of str() because the latter
# will fail if val is a Unicode containing non-ASCII
return '%s' % (mapping[named],)
except KeyError:
return self.delimiter + named
braced = mo.group('braced')
if braced is not None:
try:
return '%s' % (mapping[braced],)
except KeyError:
return self.delimiter + '{' + braced + '}'
if mo.group('escaped') is not None:
return self.delimiter
if mo.group('invalid') is not None:
return self.delimiter
raise ValueError('Unrecognized named group in pattern',
self.pattern)
return self.pattern.sub(convert, self.template)
####################################################################
# NOTE: Everything below here is deprecated. Use string methods instead.
# This stuff will go away in Python 3.0.
# Backward compatible names for exceptions
index_error = ValueError
atoi_error = ValueError
atof_error = ValueError
atol_error = ValueError
# convert UPPER CASE letters to lower case
def lower(s):
"""lower(s) -> string
Return a copy of the string s converted to lowercase.
"""
return s.lower()
# Convert lower case letters to UPPER CASE
def upper(s):
"""upper(s) -> string
Return a copy of the string s converted to uppercase.
"""
return s.upper()
# Swap lower case letters and UPPER CASE
def swapcase(s):
"""swapcase(s) -> string
Return a copy of the string s with upper case characters
converted to lowercase and vice versa.
"""
return s.swapcase()
# Strip leading and trailing tabs and spaces
def strip(s, chars=None):
"""strip(s [,chars]) -> string
Return a copy of the string s with leading and trailing
whitespace removed.
If chars is given and not None, remove characters in chars instead.
If chars is unicode, S will be converted to unicode before stripping.
"""
return s.strip(chars)
# Strip leading tabs and spaces
def lstrip(s, chars=None):
"""lstrip(s [,chars]) -> string
Return a copy of the string s with leading whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.lstrip(chars)
# Strip trailing tabs and spaces
def rstrip(s, chars=None):
"""rstrip(s [,chars]) -> string
Return a copy of the string s with trailing whitespace removed.
If chars is given and not None, remove characters in chars instead.
"""
return s.rstrip(chars)
# Split a string into a list of space/tab-separated words
def split(s, sep=None, maxsplit=-1):
"""split(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string. If maxsplit is given, splits at no more than
maxsplit places (resulting in at most maxsplit+1 words). If sep
is not specified or is None, any whitespace string is a separator.
(split and splitfields are synonymous)
"""
return s.split(sep, maxsplit)
splitfields = split
# Split a string into a list of space/tab-separated words
def rsplit(s, sep=None, maxsplit=-1):
"""rsplit(s [,sep [,maxsplit]]) -> list of strings
Return a list of the words in the string s, using sep as the
delimiter string, starting at the end of the string and working
to the front. If maxsplit is given, at most maxsplit splits are
done. If sep is not specified or is None, any whitespace string
is a separator.
"""
return s.rsplit(sep, maxsplit)
# Join fields with optional separator
def join(words, sep = ' '):
"""join(list [,sep]) -> string
Return a string composed of the words in list, with
intervening occurrences of sep. The default separator is a
single space.
(joinfields and join are synonymous)
"""
return sep.join(words)
joinfields = join
# Find substring, raise exception if not found
def index(s, *args):
"""index(s, sub [,start [,end]]) -> int
Like find but raises ValueError when the substring is not found.
"""
return s.index(*args)
# Find last substring, raise exception if not found
def rindex(s, *args):
"""rindex(s, sub [,start [,end]]) -> int
Like rfind but raises ValueError when the substring is not found.
"""
return s.rindex(*args)
# Count non-overlapping occurrences of substring
def count(s, *args):
"""count(s, sub[, start[,end]]) -> int
Return the number of occurrences of substring sub in string
s[start:end]. Optional arguments start and end are
interpreted as in slice notation.
"""
return s.count(*args)
# Find substring, return -1 if not found
def find(s, *args):
"""find(s, sub [,start [,end]]) -> in
Return the lowest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.find(*args)
# Find last substring, return -1 if not found
def rfind(s, *args):
"""rfind(s, sub [,start [,end]]) -> int
Return the highest index in s where substring sub is found,
such that sub is contained within s[start,end]. Optional
arguments start and end are interpreted as in slice notation.
Return -1 on failure.
"""
return s.rfind(*args)
# for a bit of speed
_float = float
_int = int
_long = long
# Convert string to float
def atof(s):
"""atof(s) -> float
Return the floating point number represented by the string s.
"""
return _float(s)
# Convert string to integer
def atoi(s , base=10):
"""atoi(s [,base]) -> int
Return the integer represented by the string s in the given
base, which defaults to 10. The string s must consist of one
or more digits, possibly preceded by a sign. If base is 0, it
is chosen from the leading characters of s, 0 for octal, 0x or
0X for hexadecimal. If base is 16, a preceding 0x or 0X is
accepted.
"""
return _int(s, base)
# Convert string to long integer
def atol(s, base=10):
"""atol(s [,base]) -> long
Return the long integer represented by the string s in the
given base, which defaults to 10. The string s must consist
of one or more digits, possibly preceded by a sign. If base
is 0, it is chosen from the leading characters of s, 0 for
octal, 0x or 0X for hexadecimal. If base is 16, a preceding
0x or 0X is accepted. A trailing L or l is not accepted,
unless base is 0.
"""
return _long(s, base)
# Left-justify a string
def ljust(s, width, *args):
"""ljust(s, width[, fillchar]) -> string
Return a left-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.ljust(width, *args)
# Right-justify a string
def rjust(s, width, *args):
"""rjust(s, width[, fillchar]) -> string
Return a right-justified version of s, in a field of the
specified width, padded with spaces as needed. The string is
never truncated. If specified the fillchar is used instead of spaces.
"""
return s.rjust(width, *args)
# Center a string
def center(s, width, *args):
"""center(s, width[, fillchar]) -> string
Return a center version of s, in a field of the specified
width. padded with spaces as needed. The string is never
truncated. If specified the fillchar is used instead of spaces.
"""
return s.center(width, *args)
# Zero-fill a number, e.g., (12, 3) --> '012' and (-3, 3) --> '-03'
# Decadent feature: the argument may be a string or a number
# (Use of this is deprecated; it should be a string as with ljust c.s.)
def zfill(x, width):
"""zfill(x, width) -> string
Pad a numeric string x with zeros on the left, to fill a field
of the specified width. The string x is never truncated.
"""
if not isinstance(x, basestring):
x = repr(x)
return x.zfill(width)
# Expand tabs in a string.
# Doesn't take non-printing chars into account, but does understand \n.
def expandtabs(s, tabsize=8):
"""expandtabs(s [,tabsize]) -> string
Return a copy of the string s with all tab characters replaced
by the appropriate number of spaces, depending on the current
column, and the tabsize (default 8).
"""
return s.expandtabs(tabsize)
# Character translation through look-up table.
def translate(s, table, deletions=""):
"""translate(s,table [,deletions]) -> string
Return a copy of the string s, where all characters occurring
in the optional argument deletions are removed, and the
remaining characters have been mapped through the given
translation table, which must be a string of length 256. The
deletions argument is not allowed for Unicode strings.
"""
if deletions or table is None:
return s.translate(table, deletions)
else:
# Add s[:0] so that if s is Unicode and table is an 8-bit string,
# table is converted to Unicode. This means that table *cannot*
# be a dictionary -- for that feature, use u.translate() directly.
return s.translate(table + s[:0])
# Capitalize a string, e.g. "aBc dEf" -> "Abc def".
def capitalize(s):
"""capitalize(s) -> string
Return a copy of the string s with only its first character
capitalized.
"""
return s.capitalize()
# Substring replacement (global)
def replace(s, old, new, maxreplace=-1):
"""replace (str, old, new[, maxreplace]) -> string
Return a copy of string str with all occurrences of substring
old replaced by new. If the optional argument maxreplace is
given, only the first maxreplace occurrences are replaced.
"""
return s.replace(old, new, maxreplace)
# Try importing optional built-in module "strop" -- if it exists,
# it redefines some string operations that are 100-1000 times faster.
# It also defines values for whitespace, lowercase and uppercase
# that match <ctype.h>'s definitions.
try:
from strop import maketrans, lowercase, uppercase, whitespace
letters = lowercase + uppercase
except ImportError:
pass # Use the original versions
########################################################################
# the Formatter class
# see PEP 3101 for details and purpose of this class
# The hard parts are reused from the C implementation. They're exposed as "_"
# prefixed methods of str and unicode.
# The overall parser is implemented in str._formatter_parser.
# The field name parser is implemented in str._formatter_field_name_split
class Formatter(object):
def format(self, format_string, *args, **kwargs):
return self.vformat(format_string, args, kwargs)
def vformat(self, format_string, args, kwargs):
used_args = set()
result = self._vformat(format_string, args, kwargs, used_args, 2)
self.check_unused_args(used_args, args, kwargs)
return result
def _vformat(self, format_string, args, kwargs, used_args, recursion_depth):
if recursion_depth < 0:
raise ValueError('Max string recursion exceeded')
result = []
for literal_text, field_name, format_spec, conversion in \
self.parse(format_string):
# output the literal text
if literal_text:
result.append(literal_text)
# if there's a field, output it
if field_name is not None:
# this is some markup, find the object and do
# the formatting
# given the field_name, find the object it references
# and the argument it came from
obj, arg_used = self.get_field(field_name, args, kwargs)
used_args.add(arg_used)
# do any conversion on the resulting object
obj = self.convert_field(obj, conversion)
# expand the format spec, if needed
format_spec = self._vformat(format_spec, args, kwargs,
used_args, recursion_depth-1)
# format the object and append to the result
result.append(self.format_field(obj, format_spec))
return ''.join(result)
def get_value(self, key, args, kwargs):
if isinstance(key, (int, long)):
return args[key]
else:
return kwargs[key]
def check_unused_args(self, used_args, args, kwargs):
pass
def format_field(self, value, format_spec):
return format(value, format_spec)
def convert_field(self, value, conversion):
# do any conversion on the resulting object
if conversion == 'r':
return repr(value)
elif conversion == 's':
return str(value)
elif conversion is None:
return value
raise ValueError("Unknown conversion specifier {0!s}".format(conversion))
# returns an iterable that contains tuples of the form:
# (literal_text, field_name, format_spec, conversion)
# literal_text can be zero length
# field_name can be None, in which case there's no
# object to format and output
# if field_name is not None, it is looked up, formatted
# with format_spec and conversion and then used
def parse(self, format_string):
return format_string._formatter_parser()
# given a field_name, find the object it references.
# field_name: the field being looked up, e.g. "0.name"
# or "lookup[3]"
# used_args: a set of which args have been used
# args, kwargs: as passed in to vformat
def get_field(self, field_name, args, kwargs):
first, rest = field_name._formatter_field_name_split()
obj = self.get_value(first, args, kwargs)
# loop through the rest of the field_name, doing
# getattr or getitem as needed
for is_attr, i in rest:
if is_attr:
obj = getattr(obj, i)
else:
obj = obj[i]
return obj, first
| mit |
Integral-Technology-Solutions/ConfigNOW-4.3 | Lib/bdb.py | 4 | 18155 | """Debugger basics"""
import sys
import os
import types
__all__ = ["BdbQuit","Bdb","Breakpoint"]
BdbQuit = 'bdb.BdbQuit' # Exception to give up completely
class Bdb:
"""Generic Python debugger base class.
This class takes care of details of the trace facility;
a derived class should implement user interaction.
The standard debugger class (pdb.Pdb) is an example.
"""
def __init__(self):
self.breaks = {}
self.fncache = {}
def canonic(self, filename):
canonic = self.fncache.get(filename)
if not canonic:
canonic = os.path.abspath(filename)
self.fncache[filename] = canonic
return canonic
def reset(self):
import linecache
linecache.checkcache()
self.botframe = None
self.stopframe = None
self.returnframe = None
self.quitting = 0
def trace_dispatch(self, frame, event, arg):
if self.quitting:
return # None
if event == 'line':
return self.dispatch_line(frame)
if event == 'call':
return self.dispatch_call(frame, arg)
if event == 'return':
return self.dispatch_return(frame, arg)
if event == 'exception':
return self.dispatch_exception(frame, arg)
print 'bdb.Bdb.dispatch: unknown debugging event:', `event`
return self.trace_dispatch
def dispatch_line(self, frame):
if self.stop_here(frame) or self.break_here(frame):
self.user_line(frame)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_call(self, frame, arg):
# XXX 'arg' is no longer used
if self.botframe is None:
# First call of dispatch since reset()
self.botframe = frame
return self.trace_dispatch
if not (self.stop_here(frame) or self.break_anywhere(frame)):
# No need to trace this function
return # None
self.user_call(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
def dispatch_return(self, frame, arg):
if self.stop_here(frame) or frame == self.returnframe:
self.user_return(frame, arg)
if self.quitting: raise BdbQuit
def dispatch_exception(self, frame, arg):
if self.stop_here(frame):
self.user_exception(frame, arg)
if self.quitting: raise BdbQuit
return self.trace_dispatch
# Normally derived classes don't override the following
# methods, but they may if they want to redefine the
# definition of stopping and breakpoints.
def stop_here(self, frame):
if self.stopframe is None:
return 1
if frame is self.stopframe:
return 1
while frame is not None and frame is not self.stopframe:
if frame is self.botframe:
return 1
frame = frame.f_back
return 0
def break_here(self, frame):
filename = self.canonic(frame.f_code.co_filename)
if not self.breaks.has_key(filename):
return 0
lineno = frame.f_lineno
if not lineno in self.breaks[filename]:
return 0
# flag says ok to delete temp. bp
(bp, flag) = effective(filename, lineno, frame)
if bp:
self.currentbp = bp.number
if (flag and bp.temporary):
self.do_clear(str(bp.number))
return 1
else:
return 0
def do_clear(self, arg):
raise NotImplementedError, "subclass of bdb must implement do_clear()"
def break_anywhere(self, frame):
return self.breaks.has_key(
self.canonic(frame.f_code.co_filename))
# Derived classes should override the user_* methods
# to gain control.
def user_call(self, frame, argument_list):
"""This method is called when there is the remote possibility
that we ever need to stop in this function."""
pass
def user_line(self, frame):
"""This method is called when we stop or break at this line."""
pass
def user_return(self, frame, return_value):
"""This method is called when a return trap is set here."""
pass
def user_exception(self, frame, (exc_type, exc_value, exc_traceback)):
"""This method is called if an exception occurs,
but only if we are to stop at or just below this level."""
pass
# Derived classes and clients can call the following methods
# to affect the stepping state.
def set_step(self):
"""Stop after one line of code."""
self.stopframe = None
self.returnframe = None
self.quitting = 0
def set_next(self, frame):
"""Stop on the next line in or below the given frame."""
self.stopframe = frame
self.returnframe = None
self.quitting = 0
def set_return(self, frame):
"""Stop when returning from the given frame."""
self.stopframe = frame.f_back
self.returnframe = frame
self.quitting = 0
def set_trace(self):
"""Start debugging from here."""
try:
1 + ''
except:
frame = sys.exc_info()[2].tb_frame.f_back
self.reset()
while frame:
frame.f_trace = self.trace_dispatch
self.botframe = frame
frame = frame.f_back
self.set_step()
sys.settrace(self.trace_dispatch)
def set_continue(self):
# Don't stop except at breakpoints or when finished
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 0
if not self.breaks:
# no breakpoints; run without debugger overhead
sys.settrace(None)
try:
1 + '' # raise an exception
except:
frame = sys.exc_info()[2].tb_frame.f_back
while frame and frame is not self.botframe:
del frame.f_trace
frame = frame.f_back
def set_quit(self):
self.stopframe = self.botframe
self.returnframe = None
self.quitting = 1
sys.settrace(None)
# Derived classes and clients can call the following methods
# to manipulate breakpoints. These methods return an
# error message is something went wrong, None if all is well.
# Set_break prints out the breakpoint line and file:lineno.
# Call self.get_*break*() to see the breakpoints or better
# for bp in Breakpoint.bpbynumber: if bp: bp.bpprint().
def set_break(self, filename, lineno, temporary=0, cond = None):
filename = self.canonic(filename)
import linecache # Import as late as possible
line = linecache.getline(filename, lineno)
if not line:
return 'Line %s:%d does not exist' % (filename,
lineno)
if not self.breaks.has_key(filename):
self.breaks[filename] = []
list = self.breaks[filename]
if not lineno in list:
list.append(lineno)
bp = Breakpoint(filename, lineno, temporary, cond)
def clear_break(self, filename, lineno):
filename = self.canonic(filename)
if not self.breaks.has_key(filename):
return 'There are no breakpoints in %s' % filename
if lineno not in self.breaks[filename]:
return 'There is no breakpoint at %s:%d' % (filename,
lineno)
# If there's only one bp in the list for that file,line
# pair, then remove the breaks entry
for bp in Breakpoint.bplist[filename, lineno][:]:
bp.deleteMe()
if not Breakpoint.bplist.has_key((filename, lineno)):
self.breaks[filename].remove(lineno)
if not self.breaks[filename]:
del self.breaks[filename]
def clear_bpbynumber(self, arg):
try:
number = int(arg)
except:
return 'Non-numeric breakpoint number (%s)' % arg
try:
bp = Breakpoint.bpbynumber[number]
except IndexError:
return 'Breakpoint number (%d) out of range' % number
if not bp:
return 'Breakpoint (%d) already deleted' % number
self.clear_break(bp.file, bp.line)
def clear_all_file_breaks(self, filename):
filename = self.canonic(filename)
if not self.breaks.has_key(filename):
return 'There are no breakpoints in %s' % filename
for line in self.breaks[filename]:
blist = Breakpoint.bplist[filename, line]
for bp in blist:
bp.deleteMe()
del self.breaks[filename]
def clear_all_breaks(self):
if not self.breaks:
return 'There are no breakpoints'
for bp in Breakpoint.bpbynumber:
if bp:
bp.deleteMe()
self.breaks = {}
def get_break(self, filename, lineno):
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename]
def get_breaks(self, filename, lineno):
filename = self.canonic(filename)
return self.breaks.has_key(filename) and \
lineno in self.breaks[filename] and \
Breakpoint.bplist[filename, lineno] or []
def get_file_breaks(self, filename):
filename = self.canonic(filename)
if self.breaks.has_key(filename):
return self.breaks[filename]
else:
return []
def get_all_breaks(self):
return self.breaks
# Derived classes and clients can call the following method
# to get a data structure representing a stack trace.
def get_stack(self, f, t):
stack = []
if t and t.tb_frame is f:
t = t.tb_next
while f is not None:
stack.append((f, f.f_lineno))
if f is self.botframe:
break
f = f.f_back
stack.reverse()
i = max(0, len(stack) - 1)
while t is not None:
stack.append((t.tb_frame, t.tb_lineno))
t = t.tb_next
return stack, i
#
def format_stack_entry(self, frame_lineno, lprefix=': '):
import linecache, repr
frame, lineno = frame_lineno
filename = self.canonic(frame.f_code.co_filename)
s = filename + '(' + `lineno` + ')'
if frame.f_code.co_name:
s = s + frame.f_code.co_name
else:
s = s + "<lambda>"
if frame.f_locals.has_key('__args__'):
args = frame.f_locals['__args__']
else:
args = None
if args:
s = s + repr.repr(args)
else:
s = s + '()'
if frame.f_locals.has_key('__return__'):
rv = frame.f_locals['__return__']
s = s + '->'
s = s + repr.repr(rv)
line = linecache.getline(filename, lineno)
if line: s = s + lprefix + line.strip()
return s
# The following two methods can be called by clients to use
# a debugger to debug a statement, given as a string.
def run(self, cmd, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(cmd, types.CodeType):
cmd = cmd+'\n'
try:
try:
exec cmd in globals, locals
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runeval(self, expr, globals=None, locals=None):
if globals is None:
import __main__
globals = __main__.__dict__
if locals is None:
locals = globals
self.reset()
sys.settrace(self.trace_dispatch)
if not isinstance(expr, types.CodeType):
expr = expr+'\n'
try:
try:
return eval(expr, globals, locals)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
def runctx(self, cmd, globals, locals):
# B/W compatibility
self.run(cmd, globals, locals)
# This method is more useful to debug a single function call.
def runcall(self, func, *args):
self.reset()
sys.settrace(self.trace_dispatch)
res = None
try:
try:
res = apply(func, args)
except BdbQuit:
pass
finally:
self.quitting = 1
sys.settrace(None)
return res
def set_trace():
Bdb().set_trace()
class Breakpoint:
"""Breakpoint class
Implements temporary breakpoints, ignore counts, disabling and
(re)-enabling, and conditionals.
Breakpoints are indexed by number through bpbynumber and by
the file,line tuple using bplist. The former points to a
single instance of class Breakpoint. The latter points to a
list of such instances since there may be more than one
breakpoint per line.
"""
# XXX Keeping state in the class is a mistake -- this means
# you cannot have more than one active Bdb instance.
next = 1 # Next bp to be assigned
bplist = {} # indexed by (file, lineno) tuple
bpbynumber = [None] # Each entry is None or an instance of Bpt
# index 0 is unused, except for marking an
# effective break .... see effective()
def __init__(self, file, line, temporary=0, cond = None):
self.file = file # This better be in canonical form!
self.line = line
self.temporary = temporary
self.cond = cond
self.enabled = 1
self.ignore = 0
self.hits = 0
self.number = Breakpoint.next
Breakpoint.next = Breakpoint.next + 1
# Build the two lists
self.bpbynumber.append(self)
if self.bplist.has_key((file, line)):
self.bplist[file, line].append(self)
else:
self.bplist[file, line] = [self]
def deleteMe(self):
index = (self.file, self.line)
self.bpbynumber[self.number] = None # No longer in list
self.bplist[index].remove(self)
if not self.bplist[index]:
# No more bp for this f:l combo
del self.bplist[index]
def enable(self):
self.enabled = 1
def disable(self):
self.enabled = 0
def bpprint(self):
if self.temporary:
disp = 'del '
else:
disp = 'keep '
if self.enabled:
disp = disp + 'yes'
else:
disp = disp + 'no '
print '%-4dbreakpoint %s at %s:%d' % (self.number, disp,
self.file, self.line)
if self.cond:
print '\tstop only if %s' % (self.cond,)
if self.ignore:
print '\tignore next %d hits' % (self.ignore)
if (self.hits):
if (self.hits > 1): ss = 's'
else: ss = ''
print ('\tbreakpoint already hit %d time%s' %
(self.hits, ss))
# -----------end of Breakpoint class----------
# Determines if there is an effective (active) breakpoint at this
# line of code. Returns breakpoint number or 0 if none
def effective(file, line, frame):
"""Determine which breakpoint for this file:line is to be acted upon.
Called only if we know there is a bpt at this
location. Returns breakpoint that was triggered and a flag
that indicates if it is ok to delete a temporary bp.
"""
possibles = Breakpoint.bplist[file,line]
for i in range(0, len(possibles)):
b = possibles[i]
if b.enabled == 0:
continue
# Count every hit when bp is enabled
b.hits = b.hits + 1
if not b.cond:
# If unconditional, and ignoring,
# go on to next, else break
if b.ignore > 0:
b.ignore = b.ignore -1
continue
else:
# breakpoint and marker that's ok
# to delete if temporary
return (b,1)
else:
# Conditional bp.
# Ignore count applies only to those bpt hits where the
# condition evaluates to true.
try:
val = eval(b.cond, frame.f_globals,
frame.f_locals)
if val:
if b.ignore > 0:
b.ignore = b.ignore -1
# continue
else:
return (b,1)
# else:
# continue
except:
# if eval fails, most conservative
# thing is to stop on breakpoint
# regardless of ignore count.
# Don't delete temporary,
# as another hint to user.
return (b,0)
return (None, None)
# -------------------- testing --------------------
class Tdb(Bdb):
def user_call(self, frame, args):
name = frame.f_code.co_name
if not name: name = '???'
print '+++ call', name, args
def user_line(self, frame):
import linecache
name = frame.f_code.co_name
if not name: name = '???'
fn = self.canonic(frame.f_code.co_filename)
line = linecache.getline(fn, frame.f_lineno)
print '+++', fn, frame.f_lineno, name, ':', line.strip()
def user_return(self, frame, retval):
print '+++ return', retval
def user_exception(self, frame, exc_stuff):
print '+++ exception', exc_stuff
self.set_continue()
def foo(n):
print 'foo(', n, ')'
x = bar(n*10)
print 'bar returned', x
def bar(a):
print 'bar(', a, ')'
return a/2
def test():
t = Tdb()
t.run('import bdb; bdb.foo(10)')
# end
| mit |
Pynitus-Universe/Pynitus-Backend | Pynitus/io/config.py | 2 | 1510 | """
Pynitus - A free and democratic music playlist
Copyright (C) 2017 Noah Hummel
This file is part of the Pynitus program, see <https://github.com/strangedev/Pynitus>.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from typing import Any
import yaml
from Pynitus.framework import memcache
def init_config():
"""
Should be called once on server startup.
Initializes the persistent cache and loads all config values from disk.
:return: None
"""
# TODO: absolute poth for config path in bootstrap script
# TODO: log errors
with open("./pynitus.yaml") as f:
config = yaml.safe_load(f)
memcache.set("config", config)
def get(key: str) -> Any:
"""
Gets a value from the config by it's key.
:param key: The key of the value
:return: The value or None
"""
return memcache.get("config").get(key)
| agpl-3.0 |
axelkennedal/dissen | dissenEnv/lib/python3.5/site-packages/pip/_vendor/__init__.py | 128 | 4647 | """
pip._vendor is for vendoring dependencies of pip to prevent needing pip to
depend on something external.
Files inside of pip._vendor should be considered immutable and should only be
updated to versions from upstream.
"""
from __future__ import absolute_import
import glob
import os.path
import sys
# Downstream redistributors which have debundled our dependencies should also
# patch this value to be true. This will trigger the additional patching
# to cause things like "six" to be available as pip.
DEBUNDLED = False
# By default, look in this directory for a bunch of .whl files which we will
# add to the beginning of sys.path before attempting to import anything. This
# is done to support downstream re-distributors like Debian and Fedora who
# wish to create their own Wheels for our dependencies to aid in debundling.
WHEEL_DIR = os.path.abspath(os.path.dirname(__file__))
# Define a small helper function to alias our vendored modules to the real ones
# if the vendored ones do not exist. This idea of this was taken from
# https://github.com/kennethreitz/requests/pull/2567.
def vendored(modulename):
vendored_name = "{0}.{1}".format(__name__, modulename)
try:
__import__(vendored_name, globals(), locals(), level=0)
except ImportError:
try:
__import__(modulename, globals(), locals(), level=0)
except ImportError:
# We can just silently allow import failures to pass here. If we
# got to this point it means that ``import pip._vendor.whatever``
# failed and so did ``import whatever``. Since we're importing this
# upfront in an attempt to alias imports, not erroring here will
# just mean we get a regular import error whenever pip *actually*
# tries to import one of these modules to use it, which actually
# gives us a better error message than we would have otherwise
# gotten.
pass
else:
sys.modules[vendored_name] = sys.modules[modulename]
base, head = vendored_name.rsplit(".", 1)
setattr(sys.modules[base], head, sys.modules[modulename])
# If we're operating in a debundled setup, then we want to go ahead and trigger
# the aliasing of our vendored libraries as well as looking for wheels to add
# to our sys.path. This will cause all of this code to be a no-op typically
# however downstream redistributors can enable it in a consistent way across
# all platforms.
if DEBUNDLED:
# Actually look inside of WHEEL_DIR to find .whl files and add them to the
# front of our sys.path.
sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path
# Actually alias all of our vendored dependencies.
vendored("cachecontrol")
vendored("colorama")
vendored("distlib")
vendored("html5lib")
vendored("lockfile")
vendored("six")
vendored("six.moves")
vendored("six.moves.urllib")
vendored("packaging")
vendored("packaging.version")
vendored("packaging.specifiers")
vendored("pkg_resources")
vendored("progress")
vendored("retrying")
vendored("requests")
vendored("requests.packages")
vendored("requests.packages.urllib3")
vendored("requests.packages.urllib3._collections")
vendored("requests.packages.urllib3.connection")
vendored("requests.packages.urllib3.connectionpool")
vendored("requests.packages.urllib3.contrib")
vendored("requests.packages.urllib3.contrib.ntlmpool")
vendored("requests.packages.urllib3.contrib.pyopenssl")
vendored("requests.packages.urllib3.exceptions")
vendored("requests.packages.urllib3.fields")
vendored("requests.packages.urllib3.filepost")
vendored("requests.packages.urllib3.packages")
vendored("requests.packages.urllib3.packages.ordered_dict")
vendored("requests.packages.urllib3.packages.six")
vendored("requests.packages.urllib3.packages.ssl_match_hostname")
vendored("requests.packages.urllib3.packages.ssl_match_hostname."
"_implementation")
vendored("requests.packages.urllib3.poolmanager")
vendored("requests.packages.urllib3.request")
vendored("requests.packages.urllib3.response")
vendored("requests.packages.urllib3.util")
vendored("requests.packages.urllib3.util.connection")
vendored("requests.packages.urllib3.util.request")
vendored("requests.packages.urllib3.util.response")
vendored("requests.packages.urllib3.util.retry")
vendored("requests.packages.urllib3.util.ssl_")
vendored("requests.packages.urllib3.util.timeout")
vendored("requests.packages.urllib3.util.url")
| mit |
mindriot101/bokeh | bokeh/sampledata/tests/test_us_holidays.py | 3 | 1990 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.us_holidays as bsu
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'us_holidays',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.us_holidays", ALL))
@pytest.mark.sampledata
def test_us_holidays():
import bokeh.sampledata.us_holidays as bsu
assert isinstance(bsu.us_holidays, list)
# check detail for package data
assert len(bsu.us_holidays) == 305
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| bsd-3-clause |
edubrunaldi/kivy | examples/canvas/rounded_rectangle.py | 31 | 4596 | # -*- coding: utf-8 -*-
from kivy.app import App
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse, Rectangle, RoundedRectangle
from kivy.uix.label import Label
from kivy.lang import Builder
from kivy.factory import Factory
TEXTURE = 'kiwi.jpg'
YELLOW = (1, .7, 0)
ORANGE = (1, .45, 0)
RED = (1, 0, 0)
WHITE = (1, 1, 1)
class RoundedRectangleWidget(Widget):
def prepare(self):
with self.canvas:
Color(*WHITE)
# Rectangle of default size 100x100
Rectangle(pos=(50, 400))
# RoundedRectangles of default size 100x100:
# Textured:
RoundedRectangle(
pos=(175, 400), radius=[0, 50, 0, 50], source=TEXTURE)
# Colored:
Color(*YELLOW)
RoundedRectangle(pos=(300, 400), radius=[0, 50, 0, 50])
# Textured + Colored
# Color(.3,.3,.3, 1)
RoundedRectangle(
pos=(425, 400), radius=[0, 50, 0, 50], source=TEXTURE)
# Possible radius arguments:
# 1) Same value for each corner
Color(*ORANGE)
# With same radius 20x20
RoundedRectangle(pos=(50, 275), radius=[20])
# With same radius dimensions 20x40
RoundedRectangle(pos=(175, 275), radius=[(20, 40)])
# 2) Different values for each corner
Color(*RED)
# With different radiuses NxN:
RoundedRectangle(pos=(300, 275), radius=[10, 20, 30, 40])
# With different radiuses:
RoundedRectangle(
pos=(425, 275), radius=[(10, 20), (20, 30), (30, 40), (40, 50)])
# Default ellipses
Color(*WHITE)
Ellipse(pos=(50, 150))
Ellipse(pos=(175, 150))
Ellipse(pos=(300, 150))
Ellipse(pos=(425, 150))
# Radius dimensions can't be bigger than half of the figure side
RoundedRectangle(pos=(175, 150), radius=[9000], source=TEXTURE)
# Segments parameter defines how many segments each corner has.
# More segments - more roundness
Color(*RED)
RoundedRectangle(pos=(300, 150), radius=[9000])
RoundedRectangle(pos=(425, 150), radius=[9000], segments=15)
Color(*ORANGE)
RoundedRectangle(pos=(425, 150), radius=[9000], segments=2)
Color(*YELLOW)
RoundedRectangle(pos=(425, 150), radius=[9000], segments=1)
# Various sizes
# You can cut corners by setting segments to 1.
# You can set different segment count to corners,
# by using a list useful for lowering vertex count
# by using small amount on small corners, while using
# bigger amount on bigger corners.
RoundedRectangle(
pos=(50, 25),
radius=[40],
segments=[1, 1, 10, 10],
size=(125, 100))
# If radius dimension is 0, then the corner will be sharp
# (90 degrees). It is also possible to mix tuple values
# with numeric
Color(*ORANGE)
RoundedRectangle(
pos=(200, 25),
radius=[(40, 20),
45.5, 45.5, 0],
segments=[2, 3, 3, 1], size=(125, 100))
Color(*RED)
RoundedRectangle(
pos=(350, 25),
radius=[(40, 40), (40, 40), (20, 20), (20, 20)],
segments=[2, 3, 3, 2],
size=(150, 100))
class DrawRoundedRectanglesApp(App):
def build(self):
kv = '''
Widget:
canvas:
Color:
rgba: 1, 1,1, 1
RoundedRectangle:
pos: 575, 400
size: 100, 100
radius: [0, 50, 0, 50]
source: 'kiwi.jpg'
Color:
rgba: 0, 0.8, 0.8, 1
RoundedRectangle:
pos: 575, 275
size: 100, 100
radius: [(10, 20), (20, 30), (30, 40), (40, 50)]
RoundedRectangle:
pos: 575, 150
size: 100, 100
radius: [9000]
segments: 15
RoundedRectangle:
pos: 550, 25
size: 150, 100
segments: [1, 2, 1, 3]
radius: [30, 40, 30, 40]
'''
widget = RoundedRectangleWidget()
widget.prepare()
kvrect = Builder.load_string(kv)
widget.add_widget(kvrect)
return widget
if __name__ == '__main__':
DrawRoundedRectanglesApp().run()
| mit |
mzizzi/ansible | lib/ansible/modules/network/vyos/vyos_system.py | 40 | 6296 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: "vyos_system"
version_added: "2.3"
author: "Nathaniel Case (@qalthos)"
short_description: Run `set system` commands on VyOS devices
description:
- Runs one or more commands on remote devices running VyOS.
This module can also be introspected to validate key parameters before
returning successfully.
extends_documentation_fragment: vyos
options:
hostname:
description:
- Configure the device hostname parameter. This option takes an ASCII string value.
domain_name:
description:
- The new domain name to apply to the device.
name_server:
description:
- A list of name servers to use with the device. Mutually exclusive with
I(domain_search)
required: false
default: null
domain_search:
description:
- A list of domain names to search. Mutually exclusive with
I(name_server)
state:
description:
- Whether to apply (C(present)) or remove (C(absent)) the settings.
default: present
choices: ['present', 'absent']
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- set system hostname vyos01
- set system domain-name foo.example.com
"""
EXAMPLES = """
- name: configure hostname and domain-name
vyos_system:
hostname: vyos01
domain_name: test.example.com
- name: remove all configuration
vyos_system:
state: absent
- name: configure name servers
vyos_system:
name_server:
- 8.8.8.8
- 8.8.4.4
- name: configure domain search suffixes
vyos_system:
domain_search:
- sub1.example.com
- sub2.example.com
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.vyos import get_config, load_config
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def spec_key_to_device_key(key):
device_key = key.replace('_', '-')
# domain-search is longer than just it's key
if device_key == 'domain-search':
device_key += ' domain'
return device_key
def config_to_dict(module):
data = get_config(module)
config = {'domain_search': [], 'name_server': []}
for line in data.split('\n'):
if line.startswith('set system host-name'):
config['host_name'] = line[22:-1]
elif line.startswith('set system domain-name'):
config['domain_name'] = line[24:-1]
elif line.startswith('set system domain-search domain'):
config['domain_search'].append(line[33:-1])
elif line.startswith('set system name-server'):
config['name_server'].append(line[24:-1])
return config
def spec_to_commands(want, have):
commands = []
state = want.pop('state')
# state='absent' by itself has special meaning
if state == 'absent' and all(v is None for v in want.values()):
# Clear everything
for key in have:
commands.append('delete system %s' % spec_key_to_device_key(key))
for key in want:
if want[key] is None:
continue
current = have.get(key)
proposed = want[key]
device_key = spec_key_to_device_key(key)
# These keys are lists which may need to be reconciled with the device
if key in ['domain_search', 'name_server']:
if not proposed:
# Empty list was passed, delete all values
commands.append("delete system %s" % device_key)
for config in proposed:
if state == 'absent' and config in current:
commands.append("delete system %s '%s'" % (device_key, config))
elif state == 'present' and config not in current:
commands.append("set system %s '%s'" % (device_key, config))
else:
if state == 'absent' and current and proposed:
commands.append('delete system %s' % device_key)
elif state == 'present' and proposed and proposed != current:
commands.append("set system %s '%s'" % (device_key, proposed))
return commands
def map_param_to_obj(module):
return {
'host_name': module.params['host_name'],
'domain_name': module.params['domain_name'],
'domain_search': module.params['domain_search'],
'name_server': module.params['name_server'],
'state': module.params['state']
}
def main():
argument_spec = dict(
host_name=dict(type='str'),
domain_name=dict(type='str'),
domain_search=dict(type='list'),
name_server=dict(type='list', aliases=['name_servers']),
state=dict(type='str', default='present', choices=['present', 'absent']),
)
argument_spec.update(vyos_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
mutually_exclusive=[('domain_name', 'domain_search')],
)
warnings = list()
check_args(module, warnings)
result = {'changed': False, 'warnings': warnings}
want = map_param_to_obj(module)
have = config_to_dict(module)
commands = spec_to_commands(want, have)
result['commands'] = commands
if commands:
commit = not module.check_mode
response = load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
lordB8r/polls | ENV/lib/python2.7/site-packages/django/db/backends/postgresql_psycopg2/version.py | 331 | 1502 | """
Extracts the version of the PostgreSQL server.
"""
import re
# This reg-exp is intentionally fairly flexible here.
# Needs to be able to handle stuff like:
# PostgreSQL 8.3.6
# EnterpriseDB 8.3
# PostgreSQL 8.3 beta4
# PostgreSQL 8.4beta1
VERSION_RE = re.compile(r'\S+ (\d+)\.(\d+)\.?(\d+)?')
def _parse_version(text):
"Internal parsing method. Factored out for testing purposes."
major, major2, minor = VERSION_RE.search(text).groups()
try:
return int(major) * 10000 + int(major2) * 100 + int(minor)
except (ValueError, TypeError):
return int(major) * 10000 + int(major2) * 100
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)
For example, 80304 for 8.3.4. The last two digits will be 00 in the case of
releases (e.g., 80400 for 'PostgreSQL 8.4') or in the case of beta and
prereleases (e.g. 90100 for 'PostgreSQL 9.1beta2').
PQServerVersion()/``server_version`` doesn't execute a query so try that
first, then fallback to a ``SELECT version()`` query.
"""
if hasattr(connection, 'server_version'):
return connection.server_version
else:
cursor = connection.cursor()
cursor.execute("SELECT version()")
return _parse_version(cursor.fetchone()[0])
| mit |
barseghyanartur/django-debug-toolbar | docs/conf.py | 1 | 8991 | # -*- coding: utf-8 -*-
#
# Django Debug Toolbar documentation build configuration file, created by
# sphinx-quickstart on Sun Oct 27 13:18:25 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import datetime
import sys
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'example.settings'
sys.path.append(os.path.dirname(os.path.dirname(__file__)))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Django Debug Toolbar'
copyright = u'{}, Django Debug Toolbar developers and contributors'
copyright = copyright.format(datetime.date.today().year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.7'
# The full version, including alpha/beta/rc tags.
release = '1.7'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoDebugToolbardoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'DjangoDebugToolbar.tex', u'Django Debug Toolbar Documentation',
u'Django Debug Toolbar developers and contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'djangodebugtoolbar', u'Django Debug Toolbar Documentation',
[u'Django Debug Toolbar developers and contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'DjangoDebugToolbar', u'Django Debug Toolbar Documentation',
u'Django Debug Toolbar developers and contributors', 'DjangoDebugToolbar', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'https://docs.python.org/': None,
'https://docs.djangoproject.com/en/dev/': 'https://docs.djangoproject.com/en/dev/_objects/',
}
# -- Options for Read the Docs --------------------------------------------
RTD_NEW_THEME = True
| bsd-3-clause |
fourks/moveonpc | contrib/create_move.py | 7 | 2645 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# PS Move API - An interface for the PS Move Motion Controller
# Copyright (c) 2012 Thomas Perl <[email protected]>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#
# create_move.py - Create device entry for a Move Motion Controller
# 2012-06-02 Thomas Perl <thp.io/about>
#
import dbus
import sys
if len(sys.argv) != 2:
print >>sys.stderr, """
Usage: %s <btaddr>
Tries to add the controller with address <btaddr> to the
bluetoothd list of known devices via D-Bus.
""" % sys.argv[0]
sys.exit(1)
address = sys.argv[1]
system_bus = dbus.SystemBus()
manager_object = system_bus.get_object('org.bluez', '/')
manager = dbus.Interface(manager_object, 'org.bluez.Manager')
adapter_path = manager.DefaultAdapter()
adapter_object = system_bus.get_object('org.bluez', adapter_path)
adapter = dbus.Interface(adapter_object, 'org.bluez.Adapter')
try:
adapter.CreateDevice(address)
except Exception, e:
print 'CreateDevice exception:', e
for device in adapter.ListDevices():
device_object = system_bus.get_object('org.bluez', device)
device = dbus.Interface(device_object, 'org.bluez.Device')
properties = device.GetProperties()
if properties['Address'].lower() == address.lower():
print 'Setting device as trusted...'
device.SetProperty('Trusted', True)
break
| bsd-2-clause |
andreaso/ansible | lib/ansible/plugins/terminal/sros.py | 43 | 1387 | #
# (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible.plugins.terminal import TerminalBase
from ansible.errors import AnsibleConnectionFailure
class TerminalModule(TerminalBase):
terminal_stdout_re = [
re.compile(br"[\r\n]?[\w+\-\.:\/\[\]]+(?:\([^\)]+\)){,3}(?:>|#) ?$"),
re.compile(br"\[\w+\@[\w\-\.]+(?: [^\]])\] ?[>#\$] ?$")
]
terminal_stderr_re = [
re.compile(br"Error:"),
]
def on_open_shell(self):
try:
self._exec_cli_command(b'environment no more')
except AnsibleConnectionFailure:
raise AnsibleConnectionFailure('unable to set terminal parameters')
| gpl-3.0 |
hayderimran7/zulip | zilencer/management/commands/profile_request.py | 117 | 1632 | from __future__ import absolute_import
from optparse import make_option
from django.core.management.base import BaseCommand
from zerver.models import get_user_profile_by_email, UserMessage
from zerver.views.old_messages import get_old_messages_backend
import cProfile
import logging
from zerver.middleware import LogRequests
request_logger = LogRequests()
class MockSession(object):
def __init__(self):
self.modified = False
class MockRequest(object):
def __init__(self, email):
self.user = get_user_profile_by_email(email)
self.path = '/'
self.method = "POST"
self.META = {"REMOTE_ADDR": "127.0.0.1"}
self.REQUEST = {"anchor": UserMessage.objects.filter(user_profile=self.user).order_by("-message")[200].message_id,
"num_before": 1200,
"num_after": 200}
self.GET = {}
self.session = MockSession()
def get_full_path(self):
return self.path
def profile_request(request):
request_logger.process_request(request)
prof = cProfile.Profile()
prof.enable()
ret = get_old_messages_backend(request, request.user,
apply_markdown=True)
prof.disable()
prof.dump_stats("/tmp/profile.data")
request_logger.process_response(request, ret)
logging.info("Profiling data written to /tmp/profile.data")
return ret
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--email', action='store'),
)
def handle(self, *args, **options):
profile_request(MockRequest(options["email"]))
| apache-2.0 |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/Mako-0.1.8-py2.5.egg/mako/filters.py | 5 | 5324 | # filters.py
# Copyright (C) 2006, 2007 Geoffrey T. Dairiki <[email protected]> and Michael Bayer <[email protected]>
#
# This module is part of Mako and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re, cgi, urllib, htmlentitydefs, codecs
from StringIO import StringIO
xml_escapes = {
'&' : '&',
'>' : '>',
'<' : '<',
'"' : '"', # also " in html-only
"'" : ''' # also ' in html-only
}
# XXX: " is valid in HTML and XML
# ' is not valid HTML, but is valid XML
def html_escape(string):
return cgi.escape(string, True)
def xml_escape(string):
return re.sub(r'([&<"\'>])', lambda m: xml_escapes[m.group()], string)
def url_escape(string):
# convert into a list of octets
string = string.encode("utf8")
return urllib.quote_plus(string)
def url_unescape(string):
text = urllib.unquote_plus(string)
if not is_ascii_str(text):
text = text.decode("utf8")
return text
def trim(string):
return string.strip()
class Decode(object):
def __getattr__(self, key):
def decode(x):
if isinstance(x, unicode):
return x
if not isinstance(x, str):
return str(x)
return unicode(x, encoding=key)
return decode
decode = Decode()
_ASCII_re = re.compile(r'\A[\x00-\x7f]*\Z')
def is_ascii_str(text):
return isinstance(text, str) and _ASCII_re.match(text)
################################################################
class XMLEntityEscaper(object):
def __init__(self, codepoint2name, name2codepoint):
self.codepoint2entity = dict([(c, u'&%s;' % n)
for c,n in codepoint2name.iteritems()])
self.name2codepoint = name2codepoint
def escape_entities(self, text):
"""Replace characters with their character entity references.
Only characters corresponding to a named entity are replaced.
"""
return unicode(text).translate(self.codepoint2entity)
def __escape(self, m):
codepoint = ord(m.group())
try:
return self.codepoint2entity[codepoint]
except (KeyError, IndexError):
return '&#x%X;' % codepoint
__escapable = re.compile(r'["&<>]|[^\x00-\x7f]')
def escape(self, text):
"""Replace characters with their character references.
Replace characters by their named entity references.
Non-ASCII characters, if they do not have a named entity reference,
are replaced by numerical character references.
The return value is guaranteed to be ASCII.
"""
return self.__escapable.sub(self.__escape, unicode(text)
).encode('ascii')
# XXX: This regexp will not match all valid XML entity names__.
# (It punts on details involving involving CombiningChars and Extenders.)
#
# .. __: http://www.w3.org/TR/2000/REC-xml-20001006#NT-EntityRef
__characterrefs = re.compile(r'''& (?:
\#(\d+)
| \#x([\da-f]+)
| ( (?!\d) [:\w] [-.:\w]+ )
) ;''',
re.X | re.UNICODE)
def __unescape(self, m):
dval, hval, name = m.groups()
if dval:
codepoint = int(dval)
elif hval:
codepoint = int(hval, 16)
else:
codepoint = self.name2codepoint.get(name, 0xfffd)
# U+FFFD = "REPLACEMENT CHARACTER"
if codepoint < 128:
return chr(codepoint)
return unichr(codepoint)
def unescape(self, text):
"""Unescape character references.
All character references (both entity references and numerical
character references) are unescaped.
"""
return self.__characterrefs.sub(self.__unescape, text)
_html_entities_escaper = XMLEntityEscaper(htmlentitydefs.codepoint2name,
htmlentitydefs.name2codepoint)
html_entities_escape = _html_entities_escaper.escape_entities
html_entities_unescape = _html_entities_escaper.unescape
def htmlentityreplace_errors(ex):
"""An encoding error handler.
This python `codecs`_ error handler replaces unencodable
characters with HTML entities, or, if no HTML entity exists for
the character, XML character references.
>>> u'The cost was \u20ac12.'.encode('latin1', 'htmlentityreplace')
'The cost was €12.'
"""
if isinstance(ex, UnicodeEncodeError):
# Handle encoding errors
bad_text = ex.object[ex.start:ex.end]
text = _html_entities_escaper.escape(bad_text)
return (unicode(text), ex.end)
raise ex
codecs.register_error('htmlentityreplace', htmlentityreplace_errors)
# TODO: options to make this dynamic per-compilation will be added in a later release
DEFAULT_ESCAPES = {
'x':'filters.xml_escape',
'h':'filters.html_escape',
'u':'filters.url_escape',
'trim':'filters.trim',
'entity':'filters.html_entities_escape',
'unicode':'unicode',
'decode':'decode',
'n':'n'
}
| bsd-3-clause |
opoplawski/ansible | lib/ansible/compat/tests/mock.py | 258 | 1241 | # (c) 2014, Toshio Kuratomi <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
'''
Compat module for Python3.x's unittest.mock module
'''
# Python 2.7
# Note: Could use the pypi mock library on python3.x as well as python2.x. It
# is the same as the python3 stdlib mock library
try:
from unittest.mock import *
except ImportError:
# Python 2
try:
from mock import *
except ImportError:
print('You need the mock library installed on python2.x to run tests')
| gpl-3.0 |
anushreejangid/condoor | tests/system/test_connection_nx9k.py | 1 | 2862 | from tests.system.common import CondoorTestCase, StopTelnetSrv, StartTelnetSrv
from tests.dmock.dmock import NX9KHandler
from tests.utils import remove_cache_file
import condoor
class TestNX9KConnection(CondoorTestCase):
@StartTelnetSrv(NX9KHandler, 10024)
def setUp(self):
CondoorTestCase.setUp(self)
@StopTelnetSrv()
def tearDown(self):
pass
def test_NX9K_1_discovery(self):
remove_cache_file()
urls = ["telnet://admin:[email protected]:10024"]
conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
self.conn = conn
conn.connect(self.logfile_condoor)
self.assertEqual(conn.is_discovered, True, "Not discovered properly")
self.assertEqual(conn.hostname, "switch", "Wrong Hostname: {}".format(conn.hostname))
self.assertEqual(conn.family, "N9K", "Wrong Family: {}".format(conn.family))
self.assertEqual(conn.platform, "N9K-C9508", "Wrong Platform: {}".format(conn.platform))
self.assertEqual(conn.os_type, "NX-OS", "Wrong OS Type: {}".format(conn.os_type))
self.assertEqual(conn.os_version, "7.0(3)IED5(1)", "Wrong Version: {}".format(conn.os_version))
self.assertEqual(conn.udi['name'], "Chassis", "Wrong Name: {}".format(conn.udi['name']))
self.assertEqual(conn.udi['description'], "Nexus9000 C9508 (8 Slot) Chassis",
"Wrong Description: {}".format(conn.udi['description']))
self.assertEqual(conn.udi['pid'], "N9K-C9508", "Wrong PID: {}".format(conn.udi['pid']))
self.assertEqual(conn.udi['vid'], "V01", "Wrong VID: {}".format(conn.udi['vid']))
self.assertEqual(conn.udi['sn'], "FGE18210BQR", "Wrong S/N: {}".format(conn.udi['sn']))
self.assertEqual(conn.prompt, "switch#", "Wrong Prompt: {}".format(conn.prompt))
self.assertEqual(conn.is_console, True, "Console connection not detected")
with self.assertRaises(condoor.CommandSyntaxError):
conn.send("wrongcommand")
conn.disconnect()
def test_NX9K_2_connection_wrong_user(self):
urls = ["telnet://root:[email protected]:10024"]
self.conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
with self.assertRaises(condoor.ConnectionAuthenticationError):
self.conn.connect(self.logfile_condoor)
self.conn.disconnect()
def test_NX9K_3_connection_refused(self):
urls = ["telnet://admin:[email protected]:10023"]
self.conn = condoor.Connection("host", urls, log_session=self.log_session, log_level=self.log_level)
with self.assertRaises(condoor.ConnectionError):
self.conn.connect(self.logfile_condoor)
self.conn.disconnect()
if __name__ == '__main__':
from unittest import main
main()
| apache-2.0 |
batxes/4Cin | Six_mouse_models/Six_mouse_models_final_output_0.2_-0.1_11000/Six_mouse_models11017.py | 2 | 18199 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((2341.03, 8104.14, 5911.98), (0, 1, 0), 846)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((458.396, 8471.16, 3990.37), (0.7, 0.7, 0.7), 846)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1189.38, 9245.09, 5514.78), (0.7, 0.7, 0.7), 846)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((1513.52, 8414.94, 5604.68), (0.7, 0.7, 0.7), 846)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2299.35, 8015.85, 5348.47), (0.7, 0.7, 0.7), 846)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1734, 7739.12, 6562.51), (0.7, 0.7, 0.7), 846)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((-376.983, 6950.52, 6678.39), (0.7, 0.7, 0.7), 846)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((1614.32, 7908.38, 7547.19), (0.7, 0.7, 0.7), 846)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((2001.8, 7482.27, 7578.19), (0.7, 0.7, 0.7), 846)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2150.71, 6502.24, 8514.84), (0.7, 0.7, 0.7), 846)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2179, 8418.04, 9058.54), (0, 1, 0), 846)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((3260.35, 9243.26, 7405.03), (0.7, 0.7, 0.7), 846)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((2229.74, 8726.14, 8964.98), (0.7, 0.7, 0.7), 846)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2880.47, 7165.73, 8236.52), (0.7, 0.7, 0.7), 846)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2889.72, 8546.92, 8225.55), (0.7, 0.7, 0.7), 846)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((4152.36, 10143.3, 8152.57), (0.7, 0.7, 0.7), 846)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((3931.68, 8728.25, 9434), (0.7, 0.7, 0.7), 846)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3883.31, 8269.51, 9078.75), (0.7, 0.7, 0.7), 846)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4120.14, 8507.79, 7357.32), (0.7, 0.7, 0.7), 846)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((3893.54, 7339.3, 8790.27), (0.7, 0.7, 0.7), 846)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((4207.11, 8527.97, 7820.4), (0, 1, 0), 846)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((3361.73, 6994.33, 7422.54), (0.7, 0.7, 0.7), 846)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((4403.9, 8602.39, 6161.91), (0.7, 0.7, 0.7), 846)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((4564.74, 8354.3, 6583.76), (0.7, 0.7, 0.7), 846)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((4171.51, 6094.16, 7420.32), (0.7, 0.7, 0.7), 846)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((4706.46, 7205.81, 5741.07), (0.7, 0.7, 0.7), 846)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4886.42, 9014.31, 5404.75), (0.7, 0.7, 0.7), 846)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((4683.12, 8040.8, 6340.05), (0.7, 0.7, 0.7), 846)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((4486.86, 7586.72, 8063.8), (0.7, 0.7, 0.7), 846)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((4109.68, 7085.17, 7484.61), (0.7, 0.7, 0.7), 846)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((5500.71, 7395.62, 7047.45), (0, 1, 0), 846)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((6146.14, 7193.8, 5547.86), (0.7, 0.7, 0.7), 846)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((5826.39, 7312.38, 5876.95), (0.7, 0.7, 0.7), 846)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((5095.11, 7455.36, 5385.1), (0.7, 0.7, 0.7), 846)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((4189.32, 6435.7, 5978.3), (0.7, 0.7, 0.7), 846)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((5458.05, 6244.49, 5054.49), (0.7, 0.7, 0.7), 846)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((3929.19, 4727.62, 6940.09), (1, 0.7, 0), 846)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((4591.71, 5968.21, 4648.2), (0.7, 0.7, 0.7), 846)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((5492.36, 5375.45, 5706.04), (0.7, 0.7, 0.7), 846)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((6628.18, 5901.57, 3651.19), (1, 0.7, 0), 846)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((6749.51, 4773.86, 5445.71), (0.7, 0.7, 0.7), 846)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((8041.88, 4461.1, 4804.61), (0.7, 0.7, 0.7), 846)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((7825.55, 5094.32, 4493.92), (0.7, 0.7, 0.7), 846)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((7621.13, 4630.73, 4568), (0.7, 0.7, 0.7), 846)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((8219.07, 5448.65, 4058.66), (0.7, 0.7, 0.7), 846)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7549.4, 5044.92, 4156.86), (0.7, 0.7, 0.7), 846)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((8624.46, 4565.11, 4401.19), (0.7, 0.7, 0.7), 846)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((8647.51, 4548.53, 3300), (0.7, 0.7, 0.7), 846)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((8544.24, 4050.13, 3430.64), (0.7, 0.7, 0.7), 846)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((6900.15, 3307.04, 4095.71), (0.7, 0.7, 0.7), 846)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((7797.83, 4786, 4429.86), (0.7, 0.7, 0.7), 846)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((6735.92, 3456.59, 3610.55), (0, 1, 0), 846)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((9461.52, 3847.65, 5660.97), (0.7, 0.7, 0.7), 846)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((7896.85, 4467.34, 3954.35), (0.7, 0.7, 0.7), 846)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((7706.43, 4094.66, 2800.04), (0.7, 0.7, 0.7), 846)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((8093.28, 4483.34, 4395.81), (0.7, 0.7, 0.7), 846)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((6857.04, 4201.58, 4834.23), (0.7, 0.7, 0.7), 846)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((8234.89, 4288.4, 3257.79), (0.7, 0.7, 0.7), 846)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((7865.16, 3798.47, 4913.25), (0.7, 0.7, 0.7), 846)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((6969.8, 3942.35, 3264.21), (0.7, 0.7, 0.7), 846)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((8328.03, 2803.24, 2955.65), (0.7, 0.7, 0.7), 846)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((6628.65, 2167.18, 3887.88), (0, 1, 0), 846)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((5345.66, 1618.14, 5553.63), (0.7, 0.7, 0.7), 846)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((7134.4, 2059.69, 4803.17), (0.7, 0.7, 0.7), 846)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((9331.87, 2811.18, 4637.18), (0.7, 0.7, 0.7), 846)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((8307.08, 1306.84, 3989.13), (0.7, 0.7, 0.7), 846)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((9261.38, 1872.95, 4172.66), (0.7, 0.7, 0.7), 846)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((8786.63, 2610.2, 5644.2), (0.7, 0.7, 0.7), 846)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((9084.73, 772.816, 6354.89), (0.7, 0.7, 0.7), 846)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((8847.88, 1725.15, 5996.77), (0.7, 0.7, 0.7), 846)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((8981.06, 366.328, 4610.46), (0.7, 0.7, 0.7), 846)
if "particle_71 geometry" not in marker_sets:
s=new_marker_set('particle_71 geometry')
marker_sets["particle_71 geometry"]=s
s= marker_sets["particle_71 geometry"]
mark=s.place_marker((8232.71, 1942.89, 5890.07), (0, 1, 0), 846)
if "particle_72 geometry" not in marker_sets:
s=new_marker_set('particle_72 geometry')
marker_sets["particle_72 geometry"]=s
s= marker_sets["particle_72 geometry"]
mark=s.place_marker((7851.52, 686.087, 5275.54), (0.7, 0.7, 0.7), 846)
if "particle_73 geometry" not in marker_sets:
s=new_marker_set('particle_73 geometry')
marker_sets["particle_73 geometry"]=s
s= marker_sets["particle_73 geometry"]
mark=s.place_marker((9239.79, 1479.03, 4764.37), (0.7, 0.7, 0.7), 846)
if "particle_74 geometry" not in marker_sets:
s=new_marker_set('particle_74 geometry')
marker_sets["particle_74 geometry"]=s
s= marker_sets["particle_74 geometry"]
mark=s.place_marker((7590.76, 828.243, 5653.13), (0, 1, 0), 846)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 |
kaolalotree/shadowsocks | shadowsocks/encrypt.py | 990 | 5180 | #!/usr/bin/env python
#
# Copyright 2012-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import sys
import hashlib
import logging
from shadowsocks import common
from shadowsocks.crypto import rc4_md5, openssl, sodium, table
method_supported = {}
method_supported.update(rc4_md5.ciphers)
method_supported.update(openssl.ciphers)
method_supported.update(sodium.ciphers)
method_supported.update(table.ciphers)
def random_string(length):
return os.urandom(length)
cached_keys = {}
def try_cipher(key, method=None):
Encryptor(key, method)
def EVP_BytesToKey(password, key_len, iv_len):
# equivalent to OpenSSL's EVP_BytesToKey() with count 1
# so that we make the same key and iv as nodejs version
cached_key = '%s-%d-%d' % (password, key_len, iv_len)
r = cached_keys.get(cached_key, None)
if r:
return r
m = []
i = 0
while len(b''.join(m)) < (key_len + iv_len):
md5 = hashlib.md5()
data = password
if i > 0:
data = m[i - 1] + password
md5.update(data)
m.append(md5.digest())
i += 1
ms = b''.join(m)
key = ms[:key_len]
iv = ms[key_len:key_len + iv_len]
cached_keys[cached_key] = (key, iv)
return key, iv
class Encryptor(object):
def __init__(self, key, method):
self.key = key
self.method = method
self.iv = None
self.iv_sent = False
self.cipher_iv = b''
self.decipher = None
method = method.lower()
self._method_info = self.get_method_info(method)
if self._method_info:
self.cipher = self.get_cipher(key, method, 1,
random_string(self._method_info[1]))
else:
logging.error('method %s not supported' % method)
sys.exit(1)
def get_method_info(self, method):
method = method.lower()
m = method_supported.get(method)
return m
def iv_len(self):
return len(self.cipher_iv)
def get_cipher(self, password, method, op, iv):
password = common.to_bytes(password)
m = self._method_info
if m[0] > 0:
key, iv_ = EVP_BytesToKey(password, m[0], m[1])
else:
# key_length == 0 indicates we should use the key directly
key, iv = password, b''
iv = iv[:m[1]]
if op == 1:
# this iv is for cipher not decipher
self.cipher_iv = iv[:m[1]]
return m[2](method, key, iv, op)
def encrypt(self, buf):
if len(buf) == 0:
return buf
if self.iv_sent:
return self.cipher.update(buf)
else:
self.iv_sent = True
return self.cipher_iv + self.cipher.update(buf)
def decrypt(self, buf):
if len(buf) == 0:
return buf
if self.decipher is None:
decipher_iv_len = self._method_info[1]
decipher_iv = buf[:decipher_iv_len]
self.decipher = self.get_cipher(self.key, self.method, 0,
iv=decipher_iv)
buf = buf[decipher_iv_len:]
if len(buf) == 0:
return buf
return self.decipher.update(buf)
def encrypt_all(password, method, op, data):
result = []
method = method.lower()
(key_len, iv_len, m) = method_supported[method]
if key_len > 0:
key, _ = EVP_BytesToKey(password, key_len, iv_len)
else:
key = password
if op:
iv = random_string(iv_len)
result.append(iv)
else:
iv = data[:iv_len]
data = data[iv_len:]
cipher = m(method, key, iv, op)
result.append(cipher.update(data))
return b''.join(result)
CIPHERS_TO_TEST = [
'aes-128-cfb',
'aes-256-cfb',
'rc4-md5',
'salsa20',
'chacha20',
'table',
]
def test_encryptor():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
encryptor = Encryptor(b'key', method)
decryptor = Encryptor(b'key', method)
cipher = encryptor.encrypt(plain)
plain2 = decryptor.decrypt(cipher)
assert plain == plain2
def test_encrypt_all():
from os import urandom
plain = urandom(10240)
for method in CIPHERS_TO_TEST:
logging.warn(method)
cipher = encrypt_all(b'key', method, 1, plain)
plain2 = encrypt_all(b'key', method, 0, cipher)
assert plain == plain2
if __name__ == '__main__':
test_encrypt_all()
test_encryptor()
| apache-2.0 |
ppapadeas/wprevents | vendor-local/lib/python/dateutil/tzwin.py | 227 | 5737 | # This code was originally contributed by Jeffrey Harris.
import datetime
import struct
import winreg
__all__ = ["tzwin", "tzwinlocal"]
ONEWEEK = datetime.timedelta(7)
TZKEYNAMENT = r"SOFTWARE\Microsoft\Windows NT\CurrentVersion\Time Zones"
TZKEYNAME9X = r"SOFTWARE\Microsoft\Windows\CurrentVersion\Time Zones"
TZLOCALKEYNAME = r"SYSTEM\CurrentControlSet\Control\TimeZoneInformation"
def _settzkeyname():
global TZKEYNAME
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
try:
winreg.OpenKey(handle, TZKEYNAMENT).Close()
TZKEYNAME = TZKEYNAMENT
except WindowsError:
TZKEYNAME = TZKEYNAME9X
handle.Close()
_settzkeyname()
class tzwinbase(datetime.tzinfo):
"""tzinfo class based on win32's timezones available in the registry."""
def utcoffset(self, dt):
if self._isdst(dt):
return datetime.timedelta(minutes=self._dstoffset)
else:
return datetime.timedelta(minutes=self._stdoffset)
def dst(self, dt):
if self._isdst(dt):
minutes = self._dstoffset - self._stdoffset
return datetime.timedelta(minutes=minutes)
else:
return datetime.timedelta(0)
def tzname(self, dt):
if self._isdst(dt):
return self._dstname
else:
return self._stdname
def list():
"""Return a list of all time zones known to the system."""
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, TZKEYNAME)
result = [winreg.EnumKey(tzkey, i)
for i in range(winreg.QueryInfoKey(tzkey)[0])]
tzkey.Close()
handle.Close()
return result
list = staticmethod(list)
def display(self):
return self._display
def _isdst(self, dt):
dston = picknthweekday(dt.year, self._dstmonth, self._dstdayofweek,
self._dsthour, self._dstminute,
self._dstweeknumber)
dstoff = picknthweekday(dt.year, self._stdmonth, self._stddayofweek,
self._stdhour, self._stdminute,
self._stdweeknumber)
if dston < dstoff:
return dston <= dt.replace(tzinfo=None) < dstoff
else:
return not dstoff <= dt.replace(tzinfo=None) < dston
class tzwin(tzwinbase):
def __init__(self, name):
self._name = name
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzkey = winreg.OpenKey(handle, "%s\%s" % (TZKEYNAME, name))
keydict = valuestodict(tzkey)
tzkey.Close()
handle.Close()
self._stdname = keydict["Std"].encode("iso-8859-1")
self._dstname = keydict["Dlt"].encode("iso-8859-1")
self._display = keydict["Display"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=3l16h", keydict["TZI"])
self._stdoffset = -tup[0]-tup[1] # Bias + StandardBias * -1
self._dstoffset = self._stdoffset-tup[2] # + DaylightBias * -1
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[4:9]
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[12:17]
def __repr__(self):
return "tzwin(%s)" % repr(self._name)
def __reduce__(self):
return (self.__class__, (self._name,))
class tzwinlocal(tzwinbase):
def __init__(self):
handle = winreg.ConnectRegistry(None, winreg.HKEY_LOCAL_MACHINE)
tzlocalkey = winreg.OpenKey(handle, TZLOCALKEYNAME)
keydict = valuestodict(tzlocalkey)
tzlocalkey.Close()
self._stdname = keydict["StandardName"].encode("iso-8859-1")
self._dstname = keydict["DaylightName"].encode("iso-8859-1")
try:
tzkey = winreg.OpenKey(handle, "%s\%s"%(TZKEYNAME, self._stdname))
_keydict = valuestodict(tzkey)
self._display = _keydict["Display"]
tzkey.Close()
except OSError:
self._display = None
handle.Close()
self._stdoffset = -keydict["Bias"]-keydict["StandardBias"]
self._dstoffset = self._stdoffset-keydict["DaylightBias"]
# See http://ww_winreg.jsiinc.com/SUBA/tip0300/rh0398.htm
tup = struct.unpack("=8h", keydict["StandardStart"])
(self._stdmonth,
self._stddayofweek, # Sunday = 0
self._stdweeknumber, # Last = 5
self._stdhour,
self._stdminute) = tup[1:6]
tup = struct.unpack("=8h", keydict["DaylightStart"])
(self._dstmonth,
self._dstdayofweek, # Sunday = 0
self._dstweeknumber, # Last = 5
self._dsthour,
self._dstminute) = tup[1:6]
def __reduce__(self):
return (self.__class__, ())
def picknthweekday(year, month, dayofweek, hour, minute, whichweek):
"""dayofweek == 0 means Sunday, whichweek 5 means last instance"""
first = datetime.datetime(year, month, 1, hour, minute)
weekdayone = first.replace(day=((dayofweek-first.isoweekday())%7+1))
for n in range(whichweek):
dt = weekdayone+(whichweek-n)*ONEWEEK
if dt.month == month:
return dt
def valuestodict(key):
"""Convert a registry key's values to a dictionary."""
dict = {}
size = winreg.QueryInfoKey(key)[1]
for i in range(size):
data = winreg.EnumValue(key, i)
dict[data[0]] = data[1]
return dict
| bsd-3-clause |
ricardogsilva/QGIS | python/plugins/processing/gui/RenderingStyles.py | 30 | 2638 | # -*- coding: utf-8 -*-
"""
***************************************************************************
RenderingStyles.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import os
from processing.tools.system import userFolder
class RenderingStyles:
styles = {}
@staticmethod
def addAlgStylesAndSave(algname, styles):
RenderingStyles.styles[algname] = styles
RenderingStyles.saveSettings()
@staticmethod
def configFile():
return os.path.join(userFolder(), 'processing_qgis_styles.conf')
@staticmethod
def loadStyles():
if not os.path.isfile(RenderingStyles.configFile()):
return
with open(RenderingStyles.configFile()) as lines:
line = lines.readline().strip('\n')
while line != '':
tokens = line.split('|')
if tokens[0] in list(RenderingStyles.styles.keys()):
RenderingStyles.styles[tokens[0]][tokens[1]] = tokens[2]
else:
alg = {}
alg[tokens[1]] = tokens[2]
RenderingStyles.styles[tokens[0]] = alg
line = lines.readline().strip('\n')
@staticmethod
def saveSettings():
with open(RenderingStyles.configFile(), 'w') as fout:
for alg in list(RenderingStyles.styles.keys()):
for out in list(RenderingStyles.styles[alg].keys()):
fout.write(alg + '|' + out + '|' +
RenderingStyles.styles[alg][out] + '\n')
@staticmethod
def getStyle(algname, outputname):
if algname in RenderingStyles.styles:
if outputname in RenderingStyles.styles[algname]:
return RenderingStyles.styles[algname][outputname]
return None
| gpl-2.0 |
B-MOOC/edx-platform | cms/djangoapps/contentstore/features/transcripts.py | 46 | 8895 | # disable missing docstring
# pylint: disable=missing-docstring
import os
from lettuce import world, step
from django.conf import settings
from xmodule.contentstore.content import StaticContent
from xmodule.contentstore.django import contentstore
from xmodule.exceptions import NotFoundError
from splinter.request_handler.request_handler import RequestHandler
TEST_ROOT = settings.COMMON_TEST_DATA_ROOT
# We should wait 300 ms for event handler invocation + 200ms for safety.
DELAY = 0.5
ERROR_MESSAGES = {
'url_format': u'Incorrect url format.',
'file_type': u'Link types should be unique.',
'links_duplication': u'Links should be unique.',
}
STATUSES = {
'found': u'Timed Transcript Found',
'not found on edx': u'No EdX Timed Transcript',
'not found': u'No Timed Transcript',
'replace': u'Timed Transcript Conflict',
'uploaded_successfully': u'Timed Transcript Uploaded Successfully',
'use existing': u'Confirm Timed Transcript',
}
SELECTORS = {
'error_bar': '.transcripts-error-message',
'url_inputs': '.videolist-settings-item input.input',
'collapse_link': '.collapse-action.collapse-setting',
'collapse_bar': '.videolist-extra-videos',
'status_bar': '.transcripts-message-status',
}
# button type , button css selector, button message
TRANSCRIPTS_BUTTONS = {
'import': ('.setting-import', 'Import YouTube Transcript'),
'download_to_edit': ('.setting-download', 'Download Transcript for Editing'),
'disabled_download_to_edit': ('.setting-download.is-disabled', 'Download Transcript for Editing'),
'upload_new_timed_transcripts': ('.setting-upload', 'Upload New Transcript'),
'replace': ('.setting-replace', 'Yes, replace the edX transcript with the YouTube transcript'),
'choose': ('.setting-choose', 'Timed Transcript from {}'),
'use_existing': ('.setting-use-existing', 'Use Current Transcript'),
}
@step('I clear fields$')
def clear_fields(_step):
# Clear the input fields and trigger an 'input' event
script = """
$('{selector}')
.prop('disabled', false)
.removeClass('is-disabled')
.attr('aria-disabled', false)
.val('')
.trigger('input');
""".format(selector=SELECTORS['url_inputs'])
world.browser.execute_script(script)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I clear field number (.+)$')
def clear_field(_step, index):
index = int(index) - 1
world.css_fill(SELECTORS['url_inputs'], '', index)
# For some reason ChromeDriver doesn't trigger an 'input' event after filling
# the field with an empty value. That's why we trigger it manually via jQuery.
world.trigger_event(SELECTORS['url_inputs'], event='input', index=index)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I expect (.+) inputs are disabled$')
def inputs_are_disabled(_step, indexes):
index_list = [int(i.strip()) - 1 for i in indexes.split(',')]
for index in index_list:
el = world.css_find(SELECTORS['url_inputs'])[index]
assert el['disabled']
@step('I expect inputs are enabled$')
def inputs_are_enabled(_step):
for index in range(3):
el = world.css_find(SELECTORS['url_inputs'])[index]
assert not el['disabled']
@step('I do not see error message$')
def i_do_not_see_error_message(_step):
assert not world.css_visible(SELECTORS['error_bar'])
@step('I see error message "([^"]*)"$')
def i_see_error_message(_step, error):
assert world.css_has_text(SELECTORS['error_bar'], ERROR_MESSAGES[error])
@step('I do not see status message$')
def i_do_not_see_status_message(_step):
assert not world.css_visible(SELECTORS['status_bar'])
@step('I see status message "([^"]*)"$')
def i_see_status_message(_step, status):
assert not world.css_visible(SELECTORS['error_bar'])
assert world.css_has_text(SELECTORS['status_bar'], STATUSES[status])
DOWNLOAD_BUTTON = TRANSCRIPTS_BUTTONS["download_to_edit"][0]
if world.is_css_present(DOWNLOAD_BUTTON, wait_time=1) and not world.css_find(DOWNLOAD_BUTTON)[0].has_class('is-disabled'):
assert _transcripts_are_downloaded()
@step('I (.*)see button "([^"]*)"$')
def i_see_button(_step, not_see, button_type):
button = button_type.strip()
if not_see.strip():
assert world.is_css_not_present(TRANSCRIPTS_BUTTONS[button][0])
else:
assert world.css_has_text(TRANSCRIPTS_BUTTONS[button][0], TRANSCRIPTS_BUTTONS[button][1])
@step('I (.*)see (.*)button "([^"]*)" number (\d+)$')
def i_see_button_with_custom_text(_step, not_see, button_type, custom_text, index):
button = button_type.strip()
custom_text = custom_text.strip()
index = int(index.strip()) - 1
if not_see.strip():
assert world.is_css_not_present(TRANSCRIPTS_BUTTONS[button][0])
else:
assert world.css_has_text(TRANSCRIPTS_BUTTONS[button][0], TRANSCRIPTS_BUTTONS[button][1].format(custom_text), index)
@step('I click transcript button "([^"]*)"$')
def click_button_transcripts_variant(_step, button_type):
button = button_type.strip()
world.css_click(TRANSCRIPTS_BUTTONS[button][0])
world.wait_for_ajax_complete()
@step('I click transcript button "([^"]*)" number (\d+)$')
def click_button_index(_step, button_type, index):
button = button_type.strip()
index = int(index.strip()) - 1
world.css_click(TRANSCRIPTS_BUTTONS[button][0], index)
world.wait_for_ajax_complete()
@step('I remove "([^"]+)" transcripts id from store')
def remove_transcripts_from_store(_step, subs_id):
"""Remove from store, if transcripts content exists."""
filename = 'subs_{0}.srt.sjson'.format(subs_id.strip())
content_location = StaticContent.compute_location(
world.scenario_dict['COURSE'].id,
filename
)
try:
content = contentstore().find(content_location)
contentstore().delete(content.location)
print('Transcript file was removed from store.')
except NotFoundError:
print('Transcript file was NOT found and not removed.')
@step('I enter a "([^"]+)" source to field number (\d+)$')
def i_enter_a_source(_step, link, index):
index = int(index) - 1
if index is not 0 and not world.css_visible(SELECTORS['collapse_bar']):
world.css_click(SELECTORS['collapse_link'])
assert world.css_visible(SELECTORS['collapse_bar'])
world.css_fill(SELECTORS['url_inputs'], link, index)
world.wait(DELAY)
world.wait_for_ajax_complete()
@step('I upload the transcripts file "([^"]*)"$')
def upload_file(_step, file_name):
path = os.path.join(TEST_ROOT, 'uploads/', file_name.strip())
world.browser.execute_script("$('form.file-chooser').show()")
world.browser.attach_file('transcript-file', os.path.abspath(path))
world.wait_for_ajax_complete()
@step('I see "([^"]*)" text in the captions')
def check_text_in_the_captions(_step, text):
world.wait_for_present('.video.is-captions-rendered')
world.wait_for(lambda _: world.css_text('.subtitles'), timeout=30)
actual_text = world.css_text('.subtitles')
assert (text in actual_text)
@step('I see value "([^"]*)" in the field "([^"]*)"$')
def check_transcripts_field(_step, values, field_name):
world.select_editor_tab('Advanced')
tab = world.css_find('#settings-tab').first
field_id = '#' + tab.find_by_xpath('.//label[text()="%s"]' % field_name.strip())[0]['for']
values_list = [i.strip() == world.css_value(field_id) for i in values.split('|')]
assert any(values_list)
world.select_editor_tab('Basic')
@step('I save changes$')
def save_changes(_step):
world.save_component()
@step('I open tab "([^"]*)"$')
def open_tab(_step, tab_name):
world.select_editor_tab(tab_name)
@step('I set value "([^"]*)" to the field "([^"]*)"$')
def set_value_transcripts_field(_step, value, field_name):
tab = world.css_find('#settings-tab').first
XPATH = './/label[text()="{name}"]'.format(name=field_name)
SELECTOR = '#' + tab.find_by_xpath(XPATH)[0]['for']
element = world.css_find(SELECTOR).first
if element['type'] == 'text':
SCRIPT = '$("{selector}").val("{value}").change()'.format(
selector=SELECTOR,
value=value
)
world.browser.execute_script(SCRIPT)
assert world.css_has_value(SELECTOR, value)
else:
assert False, 'Incorrect element type.'
world.wait_for_ajax_complete()
@step('I revert the transcript field "([^"]*)"$')
def revert_transcripts_field(_step, field_name):
world.revert_setting_entry(field_name)
def _transcripts_are_downloaded():
world.wait_for_ajax_complete()
request = RequestHandler()
DOWNLOAD_BUTTON = world.css_find(TRANSCRIPTS_BUTTONS["download_to_edit"][0]).first
url = DOWNLOAD_BUTTON['href']
request.connect(url)
return request.status_code.is_success()
| agpl-3.0 |
dhuang/incubator-airflow | airflow/contrib/hooks/qubole_hook.py | 11 | 8457 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import time
import datetime
import six
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow import configuration
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.state import State
from qds_sdk.qubole import Qubole
from qds_sdk.commands import Command, HiveCommand, PrestoCommand, HadoopCommand, \
PigCommand, ShellCommand, SparkCommand, DbTapQueryCommand, DbExportCommand, \
DbImportCommand
COMMAND_CLASSES = {
"hivecmd": HiveCommand,
"prestocmd": PrestoCommand,
"hadoopcmd": HadoopCommand,
"shellcmd": ShellCommand,
"pigcmd": PigCommand,
"sparkcmd": SparkCommand,
"dbtapquerycmd": DbTapQueryCommand,
"dbexportcmd": DbExportCommand,
"dbimportcmd": DbImportCommand
}
HYPHEN_ARGS = ['cluster_label', 'app_id', 'note_id']
POSITIONAL_ARGS = ['sub_command', 'parameters']
COMMAND_ARGS = {
"hivecmd": ['query', 'script_location', 'macros', 'tags', 'sample_size',
'cluster_label', 'name'],
'prestocmd': ['query', 'script_location', 'macros', 'tags', 'cluster_label', 'name'],
'hadoopcmd': ['sub_command', 'tags', 'cluster_label', 'name'],
'shellcmd': ['script', 'script_location', 'files', 'archives', 'parameters', 'tags',
'cluster_label', 'name'],
'pigcmd': ['script', 'script_location', 'parameters', 'tags', 'cluster_label',
'name'],
'dbtapquerycmd': ['db_tap_id', 'query', 'macros', 'tags', 'name'],
'sparkcmd': ['program', 'cmdline', 'sql', 'script_location', 'macros', 'tags',
'cluster_label', 'language', 'app_id', 'name', 'arguments', 'note_id',
'user_program_arguments'],
'dbexportcmd': ['mode', 'hive_table', 'partition_spec', 'dbtap_id', 'db_table',
'db_update_mode', 'db_update_keys', 'export_dir',
'fields_terminated_by', 'tags', 'name'],
'dbimportcmd': ['mode', 'hive_table', 'dbtap_id', 'db_table', 'where_clause',
'parallelism', 'extract_query', 'boundary_query', 'split_column',
'tags', 'name']
}
class QuboleHook(BaseHook, LoggingMixin):
def __init__(self, *args, **kwargs):
conn = self.get_connection(kwargs['qubole_conn_id'])
Qubole.configure(api_token=conn.password, api_url=conn.host)
self.task_id = kwargs['task_id']
self.dag_id = kwargs['dag'].dag_id
self.kwargs = kwargs
self.cls = COMMAND_CLASSES[self.kwargs['command_type']]
self.cmd = None
@staticmethod
def handle_failure_retry(context):
ti = context['ti']
cmd_id = ti.xcom_pull(key='qbol_cmd_id', task_ids=ti.task_id)
if cmd_id is not None:
cmd = Command.find(cmd_id)
if cmd is not None:
log = LoggingMixin().log
if cmd.status == 'done':
log.info('Command ID: %s has been succeeded, hence marking this '
'TI as Success.', cmd_id)
ti.state = State.SUCCESS
elif cmd.status == 'running':
log.info('Cancelling the Qubole Command Id: %s', cmd_id)
cmd.cancel()
def execute(self, context):
args = self.cls.parse(self.create_cmd_args(context))
self.cmd = self.cls.create(**args)
context['task_instance'].xcom_push(key='qbol_cmd_id', value=self.cmd.id)
self.log.info(
"Qubole command created with Id: %s and Status: %s",
self.cmd.id, self.cmd.status
)
while not Command.is_done(self.cmd.status):
time.sleep(Qubole.poll_interval)
self.cmd = self.cls.find(self.cmd.id)
self.log.info("Command Id: %s and Status: %s", self.cmd.id, self.cmd.status)
if 'fetch_logs' in self.kwargs and self.kwargs['fetch_logs'] is True:
self.log.info("Logs for Command Id: %s \n%s", self.cmd.id, self.cmd.get_log())
if self.cmd.status != 'done':
raise AirflowException('Command Id: {0} failed with Status: {1}'.format(
self.cmd.id, self.cmd.status))
def kill(self, ti):
"""
Kill (cancel) a Qubole commmand
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: response from Qubole
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=ti.task_id)
self.cmd = self.cls.find(cmd_id)
if self.cls and self.cmd:
self.log.info('Sending KILL signal to Qubole Command Id: %s', self.cmd.id)
self.cmd.cancel()
def get_results(self, ti=None, fp=None, inline=True, delim=None, fetch=True):
"""
Get results (or just s3 locations) of a command from Qubole and save into a file
:param ti: Task Instance of the dag, used to determine the Quboles command id
:param fp: Optional file pointer, will create one and return if None passed
:param inline: True to download actual results, False to get s3 locations only
:param delim: Replaces the CTL-A chars with the given delim, defaults to ','
:param fetch: when inline is True, get results directly from s3 (if large)
:return: file location containing actual results or s3 locations of results
"""
if fp is None:
iso = datetime.datetime.utcnow().isoformat()
logpath = os.path.expanduser(configuration.get('core', 'BASE_LOG_FOLDER'))
resultpath = logpath + '/' + self.dag_id + '/' + self.task_id + '/results'
configuration.mkdir_p(resultpath)
fp = open(resultpath + '/' + iso, 'wb')
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
self.cmd = self.cls.find(cmd_id)
self.cmd.get_results(fp, inline, delim, fetch)
fp.flush()
fp.close()
return fp.name
def get_log(self, ti):
"""
Get Logs of a command from Qubole
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: command log as text
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_log_id(self.cls, cmd_id)
def get_jobs_id(self, ti):
"""
Get jobs associated with a Qubole commands
:param ti: Task Instance of the dag, used to determine the Quboles command id
:return: Job informations assoiciated with command
"""
if self.cmd is None:
cmd_id = ti.xcom_pull(key="qbol_cmd_id", task_ids=self.task_id)
Command.get_jobs_id(self.cls, cmd_id)
def create_cmd_args(self, context):
args = []
cmd_type = self.kwargs['command_type']
inplace_args = None
tags = set([self.dag_id, self.task_id, context['run_id']])
for k,v in self.kwargs.items():
if k in COMMAND_ARGS[cmd_type]:
if k in HYPHEN_ARGS:
args.append("--{0}={1}".format(k.replace('_', '-'),v))
elif k in POSITIONAL_ARGS:
inplace_args = v
elif k == 'tags':
if isinstance(v, six.string_types):
tags.add(v)
elif isinstance(v, (list, tuple)):
for val in v:
tags.add(val)
else:
args.append("--{0}={1}".format(k,v))
if k == 'notify' and v is True:
args.append("--notify")
args.append("--tags={0}".format(','.join(filter(None,tags))))
if inplace_args is not None:
args += inplace_args.split(' ')
return args
| apache-2.0 |
valiantljk/graph-partition | linalg/tests/test_laplacian.py | 40 | 5461 | from nose import SkipTest
import networkx as nx
from networkx.generators.degree_seq import havel_hakimi_graph
class TestLaplacian(object):
numpy=1 # nosetests attribute, use nosetests -a 'not numpy' to skip test
@classmethod
def setupClass(cls):
global numpy
global scipy
global assert_equal
global assert_almost_equal
try:
import numpy
import scipy
from numpy.testing import assert_equal,assert_almost_equal
except ImportError:
raise SkipTest('SciPy not available.')
def setUp(self):
deg=[3,2,2,1,0]
self.G=havel_hakimi_graph(deg)
self.WG=nx.Graph( (u,v,{'weight':0.5,'other':0.3})
for (u,v) in self.G.edges_iter() )
self.WG.add_node(4)
self.MG=nx.MultiGraph(self.G)
# Graph with selfloops
self.Gsl = self.G.copy()
for node in self.Gsl.nodes():
self.Gsl.add_edge(node, node)
def test_laplacian(self):
"Graph Laplacian"
NL=numpy.array([[ 3, -1, -1, -1, 0],
[-1, 2, -1, 0, 0],
[-1, -1, 2, 0, 0],
[-1, 0, 0, 1, 0],
[ 0, 0, 0, 0, 0]])
WL=0.5*NL
OL=0.3*NL
assert_equal(nx.laplacian_matrix(self.G).todense(),NL)
assert_equal(nx.laplacian_matrix(self.MG).todense(),NL)
assert_equal(nx.laplacian_matrix(self.G,nodelist=[0,1]).todense(),
numpy.array([[ 1, -1],[-1, 1]]))
assert_equal(nx.laplacian_matrix(self.WG).todense(),WL)
assert_equal(nx.laplacian_matrix(self.WG,weight=None).todense(),NL)
assert_equal(nx.laplacian_matrix(self.WG,weight='other').todense(),OL)
def test_normalized_laplacian(self):
"Generalized Graph Laplacian"
GL=numpy.array([[ 1.00, -0.408, -0.408, -0.577, 0.00],
[-0.408, 1.00, -0.50, 0.00 , 0.00],
[-0.408, -0.50, 1.00, 0.00, 0.00],
[-0.577, 0.00, 0.00, 1.00, 0.00],
[ 0.00, 0.00, 0.00, 0.00, 0.00]])
Lsl = numpy.array([[ 0.75 , -0.2887, -0.2887, -0.3536, 0.],
[-0.2887, 0.6667, -0.3333, 0. , 0.],
[-0.2887, -0.3333, 0.6667, 0. , 0.],
[-0.3536, 0. , 0. , 0.5 , 0.],
[ 0. , 0. , 0. , 0. , 0.]])
assert_almost_equal(nx.normalized_laplacian_matrix(self.G).todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.MG).todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.WG).todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.WG,weight='other').todense(),
GL,decimal=3)
assert_almost_equal(nx.normalized_laplacian_matrix(self.Gsl).todense(),
Lsl, decimal=3)
def test_directed_laplacian(self):
"Directed Laplacian"
# Graph used as an example in Sec. 4.1 of Langville and Meyer,
# "Google's PageRank and Beyond". The graph contains dangling nodes, so
# the pagerank random walk is selected by directed_laplacian
G = nx.DiGraph()
G.add_edges_from(((1,2), (1,3), (3,1), (3,2), (3,5), (4,5), (4,6),
(5,4), (5,6), (6,4)))
GL = numpy.array([[ 0.9833, -0.2941, -0.3882, -0.0291, -0.0231, -0.0261],
[-0.2941, 0.8333, -0.2339, -0.0536, -0.0589, -0.0554],
[-0.3882, -0.2339, 0.9833, -0.0278, -0.0896, -0.0251],
[-0.0291, -0.0536, -0.0278, 0.9833, -0.4878, -0.6675],
[-0.0231, -0.0589, -0.0896, -0.4878, 0.9833, -0.2078],
[-0.0261, -0.0554, -0.0251, -0.6675, -0.2078, 0.9833]])
assert_almost_equal(nx.directed_laplacian_matrix(G, alpha=0.9), GL, decimal=3)
# Make the graph strongly connected, so we can use a random and lazy walk
G.add_edges_from((((2,5), (6,1))))
GL = numpy.array([[ 1. , -0.3062, -0.4714, 0. , 0. , -0.3227],
[-0.3062, 1. , -0.1443, 0. , -0.3162, 0. ],
[-0.4714, -0.1443, 1. , 0. , -0.0913, 0. ],
[ 0. , 0. , 0. , 1. , -0.5 , -0.5 ],
[ 0. , -0.3162, -0.0913, -0.5 , 1. , -0.25 ],
[-0.3227, 0. , 0. , -0.5 , -0.25 , 1. ]])
assert_almost_equal(nx.directed_laplacian_matrix(G, walk_type='random'), GL, decimal=3)
GL = numpy.array([[ 0.5 , -0.1531, -0.2357, 0. , 0. , -0.1614],
[-0.1531, 0.5 , -0.0722, 0. , -0.1581, 0. ],
[-0.2357, -0.0722, 0.5 , 0. , -0.0456, 0. ],
[ 0. , 0. , 0. , 0.5 , -0.25 , -0.25 ],
[ 0. , -0.1581, -0.0456, -0.25 , 0.5 , -0.125 ],
[-0.1614, 0. , 0. , -0.25 , -0.125 , 0.5 ]])
assert_almost_equal(nx.directed_laplacian_matrix(G, walk_type='lazy'), GL, decimal=3)
| gpl-2.0 |
chubbymaggie/pwndbg | pwndbg/commands/ropper.py | 5 | 1298 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import subprocess
import tempfile
import gdb
import pwndbg.commands
import pwndbg.vmmap
parser = argparse.ArgumentParser(description="ROP gadget search with ropper.",
epilog="Example: ropper -- --console; ropper -- --search 'mov e?x'")
parser.add_argument('argument', nargs='*', type=str,
help='Arguments to pass to ropper')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWithFile
def ropper(argument):
with tempfile.NamedTemporaryFile() as corefile:
# If the process is running, dump a corefile so we get actual addresses.
if pwndbg.proc.alive:
filename = corefile.name
gdb.execute('gcore %s' % filename)
else:
filename = pwndbg.proc.exe
# Build up the command line to run
cmd = ['ropper',
'--file',
filename]
cmd += argument
try:
io = subprocess.call(cmd)
except Exception:
print("Could not run ropper. Please ensure it's installed and in $PATH.")
| mit |
nishad-jobsglobal/odoo-marriot | addons/l10n_br/__init__.py | 430 | 1403 | # -*- encoding: utf-8 -*-
###############################################################################
# #
# Copyright (C) 2009 Renato Lima - Akretion #
# #
#This program is free software: you can redistribute it and/or modify #
#it under the terms of the GNU Affero General Public License as published by #
#the Free Software Foundation, either version 3 of the License, or #
#(at your option) any later version. #
# #
#This program is distributed in the hope that it will be useful, #
#but WITHOUT ANY WARRANTY; without even the implied warranty of #
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
#GNU General Public License for more details. #
# #
#You should have received a copy of the GNU General Public License #
#along with this program. If not, see <http://www.gnu.org/licenses/>. #
###############################################################################
import account
| agpl-3.0 |
ofir123/CouchPotatoServer | libs/suds/xsd/sxbuiltin.py | 193 | 7297 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( [email protected] )
"""
The I{sxbuiltin} module provides classes that represent
XSD I{builtin} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.sax.date import *
from suds.xsd.sxbase import XBuiltin
import datetime as dt
log = getLogger(__name__)
class XString(XBuiltin):
"""
Represents an (xsd) <xs:string/> node
"""
pass
class XAny(XBuiltin):
"""
Represents an (xsd) <any/> node
"""
def __init__(self, schema, name):
XBuiltin.__init__(self, schema, name)
self.nillable = False
def get_child(self, name):
child = XAny(self.schema, name)
return (child, [])
def any(self):
return True
class XBoolean(XBuiltin):
"""
Represents an (xsd) boolean builtin type.
"""
translation = (
{ '1':True,'true':True,'0':False,'false':False },
{ True:'true',1:'true',False:'false',0:'false' },
)
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring):
return XBoolean.translation[0].get(value)
else:
return None
else:
if isinstance(value, (bool,int)):
return XBoolean.translation[1].get(value)
else:
return value
class XInteger(XBuiltin):
"""
Represents an (xsd) xs:int builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return int(value)
else:
return None
else:
if isinstance(value, int):
return str(value)
else:
return value
class XLong(XBuiltin):
"""
Represents an (xsd) xs:long builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return long(value)
else:
return None
else:
if isinstance(value, (int,long)):
return str(value)
else:
return value
class XFloat(XBuiltin):
"""
Represents an (xsd) xs:float builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return float(value)
else:
return None
else:
if isinstance(value, float):
return str(value)
else:
return value
class XDate(XBuiltin):
"""
Represents an (xsd) xs:date builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return Date(value).date
else:
return None
else:
if isinstance(value, dt.date):
return str(Date(value))
else:
return value
class XTime(XBuiltin):
"""
Represents an (xsd) xs:time builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return Time(value).time
else:
return None
else:
if isinstance(value, dt.date):
return str(Time(value))
else:
return value
class XDateTime(XBuiltin):
"""
Represents an (xsd) xs:datetime builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return DateTime(value).datetime
else:
return None
else:
if isinstance(value, dt.date):
return str(DateTime(value))
else:
return value
class Factory:
tags =\
{
# any
'anyType' : XAny,
# strings
'string' : XString,
'normalizedString' : XString,
'ID' : XString,
'Name' : XString,
'QName' : XString,
'NCName' : XString,
'anySimpleType' : XString,
'anyURI' : XString,
'NOTATION' : XString,
'token' : XString,
'language' : XString,
'IDREFS' : XString,
'ENTITIES' : XString,
'IDREF' : XString,
'ENTITY' : XString,
'NMTOKEN' : XString,
'NMTOKENS' : XString,
# binary
'hexBinary' : XString,
'base64Binary' : XString,
# integers
'int' : XInteger,
'integer' : XInteger,
'unsignedInt' : XInteger,
'positiveInteger' : XInteger,
'negativeInteger' : XInteger,
'nonPositiveInteger' : XInteger,
'nonNegativeInteger' : XInteger,
# longs
'long' : XLong,
'unsignedLong' : XLong,
# shorts
'short' : XInteger,
'unsignedShort' : XInteger,
'byte' : XInteger,
'unsignedByte' : XInteger,
# floats
'float' : XFloat,
'double' : XFloat,
'decimal' : XFloat,
# dates & times
'date' : XDate,
'time' : XTime,
'dateTime': XDateTime,
'duration': XString,
'gYearMonth' : XString,
'gYear' : XString,
'gMonthDay' : XString,
'gDay' : XString,
'gMonth' : XString,
# boolean
'boolean' : XBoolean,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, schema, name):
"""
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
"""
fn = cls.tags.get(name)
if fn is not None:
return fn(schema, name)
else:
return XBuiltin(schema, name)
| gpl-3.0 |
mdrumond/tensorflow | tensorflow/python/kernel_tests/reader_dataset_ops_test.py | 4 | 27094 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for the experimental input pipeline ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import zlib
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.data.ops import readers
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.lib.io import python_io
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.platform import test
from tensorflow.python.util import compat
class TextLineDatasetTest(test.TestCase):
def _lineText(self, f, l):
return compat.as_bytes("%d: %d" % (f, l))
def _createFiles(self,
num_files,
num_lines,
crlf=False,
compression_type=None):
filenames = []
for i in range(num_files):
fn = os.path.join(self.get_temp_dir(), "text_line.%d.txt" % i)
filenames.append(fn)
contents = []
for j in range(num_lines):
contents.append(self._lineText(i, j))
# Always include a newline after the record unless it is
# at the end of the file, in which case we include it
if j + 1 != num_lines or i == 0:
contents.append(b"\r\n" if crlf else b"\n")
contents = b"".join(contents)
if not compression_type:
with open(fn, "wb") as f:
f.write(contents)
elif compression_type == "GZIP":
with gzip.GzipFile(fn, "wb") as f:
f.write(contents)
elif compression_type == "ZLIB":
contents = zlib.compress(contents)
with open(fn, "wb") as f:
f.write(contents)
else:
raise ValueError("Unsupported compression_type", compression_type)
return filenames
def _testTextLineDataset(self, compression_type=None):
test_filenames = self._createFiles(
2, 5, crlf=True, compression_type=compression_type)
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TextLineDataset(
filenames, compression_type=compression_type).repeat(num_epochs)
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(5):
self.assertEqual(self._lineText(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={filenames: test_filenames,
num_epochs: 10,
batch_size: 5})
for _ in range(10):
self.assertAllEqual([self._lineText(0, i) for i in range(5)],
sess.run(get_next))
self.assertAllEqual([self._lineText(1, i) for i in range(5)],
sess.run(get_next))
def testTextLineDatasetNoCompression(self):
self._testTextLineDataset()
def testTextLineDatasetGzipCompression(self):
self._testTextLineDataset(compression_type="GZIP")
def testTextLineDatasetZlibCompression(self):
self._testTextLineDataset(compression_type="ZLIB")
def testTextLineDatasetBuffering(self):
test_filenames = self._createFiles(2, 5, crlf=True)
repeat_dataset = readers.TextLineDataset(test_filenames, buffer_size=10)
iterator = repeat_dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(2):
for i in range(5):
self.assertEqual(self._lineText(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
class FixedLengthRecordReaderTest(test.TestCase):
def setUp(self):
super(FixedLengthRecordReaderTest, self).setUp()
self._num_files = 2
self._num_records = 7
self._header_bytes = 5
self._record_bytes = 3
self._footer_bytes = 2
def _record(self, f, r):
return compat.as_bytes(str(f * 2 + r) * self._record_bytes)
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "fixed_length_record.%d.txt" % i)
filenames.append(fn)
with open(fn, "wb") as f:
f.write(b"H" * self._header_bytes)
for j in range(self._num_records):
f.write(self._record(i, j))
f.write(b"F" * self._footer_bytes)
return filenames
def testFixedLengthRecordDataset(self):
test_filenames = self._createFiles()
filenames = array_ops.placeholder(dtypes.string, shape=[None])
num_epochs = array_ops.placeholder(dtypes.int64, shape=[])
batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
batch_dataset = repeat_dataset.batch(batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
init_op = iterator.make_initializer(repeat_dataset)
init_batch_op = iterator.make_initializer(batch_dataset)
get_next = iterator.get_next()
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
init_op, feed_dict={filenames: [test_filenames[0]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(0, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from file 1.
sess.run(
init_op, feed_dict={filenames: [test_filenames[1]],
num_epochs: 1})
for i in range(self._num_records):
self.assertEqual(self._record(1, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Basic test: read from both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test repeated iteration through both files.
sess.run(init_op, feed_dict={filenames: test_filenames, num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Test batched and repeated iteration through both files.
sess.run(
init_batch_op,
feed_dict={
filenames: test_filenames,
num_epochs: 10,
batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)],
sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testFixedLengthRecordDatasetBuffering(self):
test_filenames = self._createFiles()
dataset = readers.FixedLengthRecordDataset(
test_filenames,
self._record_bytes,
self._header_bytes,
self._footer_bytes,
buffer_size=10)
iterator = dataset.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
def _iterator_checkpoint_path(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _build_iterator_graph(self, num_epochs):
filenames = self._createFiles()
path = self._iterator_checkpoint_path()
dataset = (readers.FixedLengthRecordDataset(
filenames, self._record_bytes, self._header_bytes, self._footer_bytes)
.repeat(num_epochs))
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next_op = iterator.get_next()
save_op = gen_dataset_ops.save_iterator(iterator._iterator_resource, path)
restore_op = gen_dataset_ops.restore_iterator(iterator._iterator_resource,
path)
return init_op, get_next_op, save_op, restore_op
def _restore_iterator(self):
output_types = dtypes.string
output_shapes = tensor_shape.scalar()
iterator = iterator_ops.Iterator.from_structure(output_types, output_shapes)
get_next = iterator.get_next()
restore_op = gen_dataset_ops.restore_iterator(
iterator._iterator_resource, self._iterator_checkpoint_path())
return restore_op, get_next
def testSaveRestore(self):
num_epochs = 10
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testInitThenRestore(self):
# Note: Calling init_op before restore_op is redundant. This test just makes
# sure we do not fail if restore is called on an already initialized
# iterator resource.
num_epochs = 10
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreInModifiedGraph(self):
num_epochs = 10
num_epochs_1 = 20
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs_1)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreWithoutBuildingDatasetGraph(self):
num_epochs = 10
epoch_break = 5
file_break = self._num_files // 2
record_break = self._num_records // 2
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch == epoch_break and f == file_break and
r == record_break):
sess.run(save_op)
break
self.assertEqual(self._record(f, r), sess.run(get_next_op))
else:
continue
break
else:
continue
break
else:
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
with ops.Graph().as_default() as g:
restore_op, get_next_op = self._restore_iterator()
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for epoch in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
if (epoch < epoch_break or
(epoch == epoch_break and f < file_break) or
(epoch == epoch_break and f == file_break and
r < record_break)):
continue
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreUnusedIterator(self):
num_epochs = 10
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
# Save unused iterator.
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
for _ in range(num_epochs * self._num_files * self._num_records):
sess.run(get_next_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
def testRestoreExhaustedIterator(self):
num_epochs = 10
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(num_epochs):
for f in range(self._num_files):
for r in range(self._num_records):
self.assertEqual(self._record(f, r), sess.run(get_next_op))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next_op, save_op, restore_op = self._build_iterator_graph(
num_epochs=num_epochs)
with self.test_session(graph=g) as sess:
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next_op)
class TFRecordDatasetTest(test.TestCase):
def setUp(self):
super(TFRecordDatasetTest, self).setUp()
self._num_files = 2
self._num_records = 7
self.test_filenames = self._createFiles()
self.filenames = array_ops.placeholder(dtypes.string, shape=[None])
self.num_epochs = array_ops.placeholder_with_default(
constant_op.constant(1, dtypes.int64), shape=[])
self.compression_type = array_ops.placeholder_with_default("", shape=[])
self.batch_size = array_ops.placeholder(dtypes.int64, shape=[])
repeat_dataset = readers.TFRecordDataset(self.filenames,
self.compression_type).repeat(
self.num_epochs)
batch_dataset = repeat_dataset.batch(self.batch_size)
iterator = iterator_ops.Iterator.from_structure(batch_dataset.output_types)
self.init_op = iterator.make_initializer(repeat_dataset)
self.init_batch_op = iterator.make_initializer(batch_dataset)
self.get_next = iterator.get_next()
def _record(self, f, r):
return compat.as_bytes("Record %d of file %d" % (r, f))
def _createFiles(self):
filenames = []
for i in range(self._num_files):
fn = os.path.join(self.get_temp_dir(), "tf_record.%d.txt" % i)
filenames.append(fn)
writer = python_io.TFRecordWriter(fn)
for j in range(self._num_records):
writer.write(self._record(i, j))
writer.close()
return filenames
def testReadOneEpoch(self):
with self.test_session() as sess:
# Basic test: read from file 0.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[0]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(0, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from file 1.
sess.run(
self.init_op,
feed_dict={
self.filenames: [self.test_filenames[1]],
self.num_epochs: 1
})
for i in range(self._num_records):
self.assertAllEqual(self._record(1, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
# Basic test: read from both files.
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 1})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochs(self):
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: self.test_filenames,
self.num_epochs: 10})
for _ in range(10):
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadTenEpochsOfBatches(self):
with self.test_session() as sess:
sess.run(
self.init_batch_op,
feed_dict={
self.filenames: self.test_filenames,
self.num_epochs: 10,
self.batch_size: self._num_records
})
for _ in range(10):
for j in range(self._num_files):
values = sess.run(self.get_next)
self.assertAllEqual(
[self._record(j, i) for i in range(self._num_records)], values)
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadZlibFiles(self):
zlib_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
cdata = zlib.compress(f.read())
zfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.z" % i)
with open(zfn, "wb") as f:
f.write(cdata)
zlib_files.append(zfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: zlib_files,
self.compression_type: "ZLIB"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadGzipFiles(self):
gzip_files = []
for i, fn in enumerate(self.test_filenames):
with open(fn, "rb") as f:
gzfn = os.path.join(self.get_temp_dir(), "tfrecord_%s.gz" % i)
with gzip.GzipFile(gzfn, "wb") as gzf:
gzf.write(f.read())
gzip_files.append(gzfn)
with self.test_session() as sess:
sess.run(
self.init_op,
feed_dict={self.filenames: gzip_files,
self.compression_type: "GZIP"})
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(self.get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(self.get_next)
def testReadWithBuffer(self):
one_mebibyte = 2**20
d = readers.TFRecordDataset(self.test_filenames, buffer_size=one_mebibyte)
iterator = d.make_one_shot_iterator()
with self.test_session() as sess:
for j in range(self._num_files):
for i in range(self._num_records):
self.assertAllEqual(self._record(j, i), sess.run(iterator.get_next()))
with self.assertRaises(errors.OutOfRangeError):
sess.run(iterator.get_next())
if __name__ == "__main__":
test.main()
| apache-2.0 |
Hammer2900/clamor | sessions.py | 1 | 2569 | #############################################################
# The unofficial Sessions addon for Bottle (bottlepy.org) #
# Made by Magnie (magnie.tk) and Ohaider (fallingduck.tk) #
# License: MIT #
#############################################################
#################
# DOCUMENTATION
#################
# Dependencies: Bottle (bottlepy.com)
# Commands:
# sessions.Session()
# This class should be initialized at the start of the server, and will run throughout the server's existence
# Session.start()
# Should be called on each page that uses sessions, it initializes the user's session, or if the user already has an active session, pulls the info to the front
# Session.set(name, value)
# Set a name and value pair for the user's current session
# Session.get(name)
# Returns the value for the name key, or None if key doesn't exist
#################
# EXAMPLE
#################
# from sessions import Session
# from bottle import route, run, redirect
#
# session = Session()
#
# @route('/')
# def hello():
# global session
# session.start()
# session.set('name', 'World')
# redirect('/hello')
#
# @route('/hello')
# global session
# session.start()
# if not(session.get('name')):
# redirect('/')
# return 'Hello, ' + session.get('name') + '!'
#################
from random import randint
from time import time
from hashlib import new
from bottle import request, response
class Session(object):
def __init__(self):
self.data = {}
def start(self):
if not(request.get_cookie('PYSESSID')):
sid = new('sha1', str(int(time() * 1000)) + str(randint(0, 4596))).hexdigest()
response.set_cookie('PYSESSID', sid)
sid = new('sha1', sid + request['REMOTE_ADDR']).hexdigest()
self.data[sid] = {}
try:
if not(self.data.has_key(new('sha1', request.get_cookie('PYSESSID') + request['REMOTE_ADDR']).hexdigest())):
self.data[new('sha1', request.get_cookie('PYSESSID') + request['REMOTE_ADDR']).hexdigest()] = {}
except:
pass
def set(self, n, v):
try:
sid = new('sha1', request.get_cookie('PYSESSID') + request['REMOTE_ADDR']).hexdigest()
self.data[sid][n] = v
except:
pass
def get(self, n):
try:
sid = new('sha1', request.get_cookie('PYSESSID') + request['REMOTE_ADDR']).hexdigest()
return self.data[sid][n]
except:
return None
| mit |
mpeuster/son-emu | src/emuvim/api/osm/lcm.py | 1 | 1660 | #!/usr/bin/env python2
# Copyright (c) 2019 Erik Schilling
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from emuvim.api.osm.osm_component_base import OSMComponentBase
from emuvim.api.util.docker_utils import wrap_debian_like
class LCM(OSMComponentBase):
def __init__(self, net, ip, ro_ip, mongo_ip, kafka_ip,
vca_host=os.environ.get('VCA_HOST'),
vca_secret=os.environ.get('VCA_SECRET'),
version='latest',
name_prefix=''):
OSMComponentBase.__init__(self)
self.instance = net.addDocker(
'{}lcm'.format(name_prefix), ip=ip, dimage=wrap_debian_like('opensourcemano/lcm:%s' % version),
volumes=['osm_packages:/app/storage'],
environment={
'OSMLCM_RO_HOST': ro_ip,
'OSMLCM_VCA_HOST': vca_host,
'OSMLCM_VCA_SECRET': vca_secret,
'OSMLCM_DATABASE_URI': 'mongodb://%s:27017' % mongo_ip,
'OSMLCM_MESSAGE_HOST': kafka_ip,
})
def start(self):
OSMComponentBase.start(self)
time.sleep(3)
| apache-2.0 |
jantman/kvmdash | kvmdash/tests/test_util.py | 1 | 2671 | """
Tests for the util module
"""
import pytest
from kvmdash.util import calculate_host_resources
def test_calculate_host_resources():
hosts = {}
hosts['foo'] = {"data": {"maxvcpus": 16, "df_bytes": 12345678, "hostname": "foo", "memory_bytes": 1000}, "type": "host", "name": "foo"}
hosts['bar'] = {"data": {"maxvcpus": 10, "df_bytes": 12345678, "hostname": "bar", "memory_bytes": 100}, "type": "host", "name": "bar"}
hosts['baz'] = {"data": {"maxvcpus": 10, "df_bytes": 12345678, "hostname": "baz", "memory_bytes": 100}, "type": "host", "name": "baz"}
guests = {}
guests['guest1'] = {"data": {"bridges": [{"mac": "52:54:00:a9:93:d5", "model": "virtio"}], "UUID": "786584D0-DD1B-0D0D-8551-63DB7B0D260D", "vcpus": 4, "memory_bytes": 80, "state": "running", "disk_files": ["/var/lib/libvirt/images/guest1-disk0"], "type": "kvm", "ID": 1, "name": "guest1"}, "host": "bar", "type": "guest", "name": "guest1", "uuid": "786584D0-DD1B-0D0D-8551-63DB7B0D260D"}
guests['guest2'] = {"data": {"bridges": [{"mac": "52:54:00:18:ad:18", "model": "virtio"}], "UUID": "86753771-646C-A7BE-737C-4AE0454E01C9", "vcpus": 2, "memory_bytes": 500, "state": "running", "disk_files": ["/var/lib/libvirt/images/guest2-disk0"], "type": "kvm", "ID": 12, "name": "guest2"}, "host": "foo", "type": "guest", "name": "guest2", "uuid": "86753771-646C-A7BE-737C-4AE0454E01C9"}
guests['guest3'] = {"data": {"bridges": [{"mac": "00:16:3e:5f:cc:44", "model": "virtio"}], "UUID": "40357270-17EE-F043-8B9A-4AA6BC3AFDB2", "vcpus": 8, "memory_bytes": 320, "state": "running", "disk_files": ["/var/lib/libvirt/images/guest3-disk0"], "type": "kvm", "ID": 18, "name": "guest3"}, "host": "foo", "type": "guest", "name": "guest3", "uuid": "40357270-17EE-F043-8B9A-4AA6BC3AFDB2"}
desired = {}
desired['foo'] = {"data": {"maxvcpus": 16, "df_bytes": 12345678, "hostname": "foo", "memory_bytes": 1000, 'allocated_vcpus': 10, 'unallocated_vcpus': 6, 'allocated_memory_bytes': 820, 'unallocated_memory_bytes': 180, 'num_guests': 2}, "type": "host", "name": "foo"}
desired['bar'] = {"data": {"maxvcpus": 10, "df_bytes": 12345678, "hostname": "bar", "memory_bytes": 100, 'allocated_vcpus': 4, 'unallocated_vcpus': 6, 'allocated_memory_bytes': 80, 'unallocated_memory_bytes': 20, 'num_guests': 1}, "type": "host", "name": "bar"}
desired['baz'] = {"data": {"maxvcpus": 10, "df_bytes": 12345678, "hostname": "baz", "memory_bytes": 100, 'allocated_vcpus': 0, 'unallocated_vcpus': 10, 'allocated_memory_bytes': 0, 'unallocated_memory_bytes': 100, 'num_guests': 0}, "type": "host", "name": "baz"}
result = calculate_host_resources(hosts, guests)
assert result == desired
| agpl-3.0 |
Brett55/moto | tests/test_polly/test_polly.py | 8 | 7820 | from __future__ import unicode_literals
from botocore.exceptions import ClientError
import boto3
import sure # noqa
from nose.tools import assert_raises
from moto import mock_polly
# Polly only available in a few regions
DEFAULT_REGION = 'eu-west-1'
LEXICON_XML = """<?xml version="1.0" encoding="UTF-8"?>
<lexicon version="1.0"
xmlns="http://www.w3.org/2005/01/pronunciation-lexicon"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://www.w3.org/2005/01/pronunciation-lexicon
http://www.w3.org/TR/2007/CR-pronunciation-lexicon-20071212/pls.xsd"
alphabet="ipa"
xml:lang="en-US">
<lexeme>
<grapheme>W3C</grapheme>
<alias>World Wide Web Consortium</alias>
</lexeme>
</lexicon>"""
@mock_polly
def test_describe_voices():
client = boto3.client('polly', region_name=DEFAULT_REGION)
resp = client.describe_voices()
len(resp['Voices']).should.be.greater_than(1)
resp = client.describe_voices(LanguageCode='en-GB')
len(resp['Voices']).should.equal(3)
try:
client.describe_voices(LanguageCode='SOME_LANGUAGE')
except ClientError as err:
err.response['Error']['Code'].should.equal('400')
else:
raise RuntimeError('Should of raised an exception')
@mock_polly
def test_put_list_lexicon():
client = boto3.client('polly', region_name=DEFAULT_REGION)
# Return nothing
client.put_lexicon(
Name='test',
Content=LEXICON_XML
)
resp = client.list_lexicons()
len(resp['Lexicons']).should.equal(1)
@mock_polly
def test_put_get_lexicon():
client = boto3.client('polly', region_name=DEFAULT_REGION)
# Return nothing
client.put_lexicon(
Name='test',
Content=LEXICON_XML
)
resp = client.get_lexicon(Name='test')
resp.should.contain('Lexicon')
resp.should.contain('LexiconAttributes')
@mock_polly
def test_put_lexicon_bad_name():
client = boto3.client('polly', region_name=DEFAULT_REGION)
try:
client.put_lexicon(
Name='test-invalid',
Content=LEXICON_XML
)
except ClientError as err:
err.response['Error']['Code'].should.equal('InvalidParameterValue')
else:
raise RuntimeError('Should of raised an exception')
@mock_polly
def test_synthesize_speech():
client = boto3.client('polly', region_name=DEFAULT_REGION)
# Return nothing
client.put_lexicon(
Name='test',
Content=LEXICON_XML
)
tests = (
('pcm', 'audio/pcm'),
('mp3', 'audio/mpeg'),
('ogg_vorbis', 'audio/ogg'),
)
for output_format, content_type in tests:
resp = client.synthesize_speech(
LexiconNames=['test'],
OutputFormat=output_format,
SampleRate='16000',
Text='test1234',
TextType='text',
VoiceId='Astrid'
)
resp['ContentType'].should.equal(content_type)
@mock_polly
def test_synthesize_speech_bad_lexicon():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test2'],
OutputFormat='pcm',
SampleRate='16000',
Text='test1234',
TextType='text',
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('LexiconNotFoundException')
else:
raise RuntimeError('Should of raised LexiconNotFoundException')
@mock_polly
def test_synthesize_speech_bad_output_format():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='invalid',
SampleRate='16000',
Text='test1234',
TextType='text',
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('InvalidParameterValue')
else:
raise RuntimeError('Should of raised ')
@mock_polly
def test_synthesize_speech_bad_sample_rate():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='pcm',
SampleRate='18000',
Text='test1234',
TextType='text',
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('InvalidSampleRateException')
else:
raise RuntimeError('Should of raised ')
@mock_polly
def test_synthesize_speech_bad_text_type():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='pcm',
SampleRate='16000',
Text='test1234',
TextType='invalid',
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('InvalidParameterValue')
else:
raise RuntimeError('Should of raised ')
@mock_polly
def test_synthesize_speech_bad_voice_id():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='pcm',
SampleRate='16000',
Text='test1234',
TextType='text',
VoiceId='Luke'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('InvalidParameterValue')
else:
raise RuntimeError('Should of raised ')
@mock_polly
def test_synthesize_speech_text_too_long():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='pcm',
SampleRate='16000',
Text='test1234'*376, # = 3008 characters
TextType='text',
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('TextLengthExceededException')
else:
raise RuntimeError('Should of raised ')
@mock_polly
def test_synthesize_speech_bad_speech_marks1():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='pcm',
SampleRate='16000',
Text='test1234',
TextType='text',
SpeechMarkTypes=['word'],
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException')
else:
raise RuntimeError('Should of raised ')
@mock_polly
def test_synthesize_speech_bad_speech_marks2():
client = boto3.client('polly', region_name=DEFAULT_REGION)
client.put_lexicon(Name='test', Content=LEXICON_XML)
try:
client.synthesize_speech(
LexiconNames=['test'],
OutputFormat='pcm',
SampleRate='16000',
Text='test1234',
TextType='ssml',
SpeechMarkTypes=['word'],
VoiceId='Astrid'
)
except ClientError as err:
err.response['Error']['Code'].should.equal('MarksNotSupportedForFormatException')
else:
raise RuntimeError('Should of raised ')
| apache-2.0 |
2013Commons/hue | desktop/core/ext-py/guppy-0.1.10/guppy/heapy/Monitor.py | 37 | 12734 | #._cv_part guppy.heapy.Monitor
import os, pprint, signal, socket, SocketServer, sys, threading, time, traceback
import cPickle as pickle
try:
import readline # Imported to _enable_ command line editing
except ImportError:
pass
import select, Queue
from guppy.heapy.RemoteConstants import *
from guppy.heapy.Console import Console
from guppy.sets import mutnodeset
from guppy.etc.etc import ptable
from guppy.etc import cmd
class Server(SocketServer.ThreadingTCPServer):
pass
def ioready(fd, wait):
r, w, x = select.select([fd], [], [], wait)
return len(r)
def queue_get_interruptible(q, noblock=0):
while 1:
try:
return q.get(timeout=0.2)
except Queue.Empty:
if noblock:
break
# Special value signals that connection has been closed
CONN_CLOSED = ('CLOSED',)
class Handler(SocketServer.StreamRequestHandler):
allow_reuse_address = 1
def close(self):
if not self.isclosed.tas(0):
self.server.monitor.remove_connection(self)
self.dataq.put(CONN_CLOSED)
self.request.shutdown(2)
self.request.close()
def send_cmd(self, cmd):
if not cmd.endswith('\n'):
cmd += '\n'
self.request.send(cmd)
def browser_cmd(self, cmd):
if self.prompt == '>>> ':
self.exec_cmd('q', retdata=1)
if self.prompt == '<Annex> ':
self.exec_cmd('cont', retdata=1)
return self.exec_cmd(cmd, retdata=1)
def exec_cmd(self, cmd, retdata=0, noblock=0):
if cmd is not None:
self.send_cmd(cmd)
self.promptstate = False
datas = []
while 1:
p = queue_get_interruptible(self.dataq, noblock)
if p is None:
if self.promptstate:
break
else:
time.sleep(1)
continue
if p is CONN_CLOSED:
raise EOFError
if p[0] == 'DATA':
self.promptstate = False
if retdata:
datas.append(p[1])
else:
sys.stdout.write(p[1])
elif p[0] == 'PROMPT':
self.prompt = p[1]
if self.dataq.empty():
self.promptstate = True
break
else:
self.promptstate = False
else:
assert 0
if retdata:
return ''.join(datas)
def get_ps(self, name):
for line in self.firstdata.split('\n'):
if line.startswith(name):
if '=' in line:
ix = line.index('=')
line = line[ix+1:].strip()
return line
return ''
def get_val(self, expr):
data = self.browser_cmd('dump %s'%expr)
return pickle.loads(data)
def handle(self):
self.prompt = None
self.promptstate = False
self.isclosed = mutnodeset()
self.dataq = Queue.Queue()
self.server.monitor.add_connection(self)
while 1:
try:
data = self.rfile.readline()
if not data:
raise EOFError,'End of file'
if data.endswith(DONE):
raise EOFError,'DONE'
except (EOFError, socket.error):
break
if data.endswith(READLINE):
prompt = data[:-len(READLINE)]
self.dataq.put(('PROMPT',prompt))
if self.prompt is None:
self.firstdata = self.exec_cmd(cmd=None,retdata=1)
else:
self.dataq.put(('DATA',data))
self.close()
class MonitorConnection(cmd.Cmd):
use_raw_input = 1
def __init__(self, monitor):
self.aliases = {}
cmd.Cmd.__init__(self)
self.hno = 0
self.isclosed = 0
self.forceexit = 0
self.prompt = '<Monitor> '
self.monitor = monitor
self.server = s = Server((LOCALHOST, HEAPYPORT), Handler)
self.server.monitor = monitor
self.st = threading.Thread(target = self.run_server,
args = ())
self.st.start()
def close(self):
self.isclosed = 1
self.server.socket.shutdown(2)
self.server.server_close()
self.server.verify_request = lambda x, y: 0
def default(self, line):
cmd.Cmd.default(self, line)
cmd.Cmd.do_help(self, '')
def run_server(self):
s = self.server
while not self.isclosed:
s.handle_request()
s.server_close()
def exec_cmd(self, cmd):
if not cmd:
# I don't want the repeat of previous command when giving
# empty command that is provided by cmd.py.
# It turned out to be confusing sometimes.
return
line = cmd
try:
line = self.precmd(line)
stop = self.onecmd(line)
stop = self.postcmd(stop, line)
return stop
except:
self.handle_error(line)
def handle_error(self, cmdline):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
print >>sys.stderr,'-'*40
print >>sys.stderr,'Exception happened during processing the command',
print >>sys.stderr,repr(cmdline)
import traceback
traceback.print_exc()
print >>sys.stderr, '-'*40
# Alias handling etc copied from pdb.py in Python dist
def precmd(self, line):
"""Handle alias expansion and ';;' separator."""
self.curline = line
if not line:
return line
args = line.split()
while self.aliases.has_key(args[0]):
line = self.aliases[args[0]]
if '%' in line:
ii = 1
for tmpArg in args[1:]:
line = line.replace("%" + str(ii),
tmpArg)
line = line.replace('%>=' + str(ii),
' '.join(args[ii:]))
ii = ii + 1
line = line.replace("%*", ' '.join(args[1:]))
else:
line = line + ' ' + ' '.join(args[1:])
args = line.split()
# split into ';;' separated commands
# unless it's an alias command
if args[0] != 'alias':
marker = line.find(';;')
if marker >= 0:
# queue up everything after marker
next = line[marker+2:].lstrip()
self.cmdqueue.append(next)
line = line[:marker].rstrip()
return line
def do_exit(self, arg):
self.forceexit = 1
return 1
def help_exit(self):
print """exit
-----
Exit from the monitor and from the Python process that started it.
This makes sure to exit without waiting for the server thread to terminate.
See also the q command."""
do_h = cmd.Cmd.do_help
def help_h(self):
print """h(elp)
-----
Without argument, print the list of available commands.
With a command name as argument, print help about that command."""
def help_help(self):
self.help_h()
def do_int(self, arg):
try:
con = Console(stdin=self.stdin,stdout=self.stdout,
locals=self.__dict__)
con.interact(
"Local interactive console. To return to Monitor, type %r."%
con.EOF_key_sequence)
finally:
pass
def help_int(self):
print """int
-----
Local interactive console.
This will bring up a Python console locally in
the same interpreter process that the Monitor itself."""
def do_ki(self, arg):
if not arg:
arg = self.conid
arg = int(arg)
c = self.monitor.connections[arg]
if c.get_ps('noninterruptible'):
print '''\
Error: Can not interrupt this remote connection (uses Python < 2.4)'''
else:
print 'Sending KeyboardInterrupt to connection %s.'%arg
c.send_cmd(KEYBOARDINTERRUPT)
def help_ki(self):
print """ki <connection ID>
-----
Keyboard Interrupt
Send a command to interrupt the remote thread on the specified
connection (default is the last one connected to).
Notes:
It currently only works with Python >= 2.4. The remote thread will
not always be awakened, for example if it is waiting in
time.sleep(). Sometimes using several ki commands helps."""
def do_lc(self, arg):
table = [['CID', 'PID', 'ARGV']]
for cid, con in self.monitor.connections.items():
table.append([cid,
con.get_ps('target.pid'),
con.get_ps('target.sys.argv')])
ptable(table, self.stdout)
def help_lc(self):
print """lc
-----
List Connections.
List the currently open connections.
The table printed has one line per connection in this form:
CID PID ARGV
1 17999 ['/home/nilsson/bin/solitaire.py']
CID is the connection ID, which may be used as argument to the sc
command.
PID is the process ID of the target interpreter process. In Linux,
this is the parent of the remote control interpreter thread that runs
the Annex that the connection is talking to.
ARGV is the argument vector in the target Python interpereter."""
def do_sc(self, arg):
if arg:
self.conid = int(arg)
print 'Remote connection %d. To return to Monitor, type <Ctrl-C> or .<RETURN>'%self.conid
self.monitor.set_connection(self.monitor.connections[self.conid])
def help_sc(self):
print """sc <connection ID>
-----
Set connection to communicate with a remote thread.
With an argument, set current connection to the number specified.
Without argument, use the same connection as last time. You will then
be talking to a remote process via that connection. You can return to
Monitor at any time by <Ctrl-C>. You may also use the '.' command
(followed by <Return>), if the remote process is waiting for input.
The '.' character may be followed by a monitor command, to execute it
directly instead of returning to the monitor. For example, when
talking to a connection, '.sc 1' will directly change to connection 1."""
def do_q(self, arg):
return 1
def help_q(self):
print """q
-----
Quit from the monitor.
This will not exit from Python itself if called from an interactive
interpreter. To make sure to exit from Python, use the exit command."""
class Monitor:
use_raw_input = 1
def __init__(self):
self.connection = self.monitor_connection = MonitorConnection(self)
self.connections = {}
self.ids = 0
self.prompt = None
def newid(self):
if not self.connections:
self.ids = 1
self.monitor_connection.conid = self.ids
else:
self.ids = max([1]+[c for c in self.connections.keys()])+1
return self.ids
def add_connection(self, connection):
hid = self.newid()
self.connections[hid] = connection
connection.monitor_id = hid
self.print_async( '*** Connection %s opened ***'%hid)
def print_async(self, text):
""" Print text only if we are waiting for input,
and then restore the prompt. """
if self.prompt is not None:
print '\n'+text
sys.stdout.write(self.prompt)
sys.stdout.flush()
def remove_connection(self, connection):
del self.connections[connection.monitor_id]
if connection is self.connection:
self.set_connection(self.monitor_connection)
self.print_async( '*** Connection %s closed ***'%connection.monitor_id)
def run(self):
try:
stop = 0
while not stop:
try:
while not stop:
conn = self.connection
self.prompt = conn.prompt
if conn is not self.monitor_connection:
conn.exec_cmd(cmd=None,noblock=1)
cmd = raw_input(conn.prompt)
self.prompt = None
conn = None
if cmd.startswith('.'):
if cmd == '.':
self.connection = self.monitor_connection
else:
cmd = cmd[1:]
conn = self.monitor_connection
#elif cmd or self.connection is self.monitor_connection:
else:
conn = self.connection
if conn:
try:
r = conn.exec_cmd(cmd)
except EOFError:
r = 1
if conn is self.monitor_connection and r:
stop = 1
#print 'to stop'
#print 'end of loop'
except EOFError:
'We better exit in case the input is from a file'
#print 'EOFError'
#print 'Use the monitor q command to quit.'
print '*** End Of File - Exiting Monitor ***'
self.connection = self.monitor_connection
stop = 1
except KeyboardInterrupt:
print 'KeyboardInterrupt'
print 'Use the ki command to interrupt a remote process.'
self.connection = self.monitor_connection
continue
finally:
self.prompt=None # Avoid closing messages
#print 'to close'
self.close()
def close(self):
for c in self.connections.values():
try:
#print 'to close:', c
c.close()
except socket.error:
pass
try:
#print 'to close: self'
self.monitor_connection.close()
except socket.error:
pass
if self.monitor_connection.forceexit:
os._exit(0)
def set_connection(self, connection):
self.connection = connection
self.prompt = connection.prompt
def monitor():
"""monitor() [0]
Start an interactive remote monitor.
This can be used to get information about the state, in
particular the memory usage, of separately running Python
processes.
References
[0] heapy_Use.html#heapykinds.Use.monitor"""
from guppy.heapy import Remote
Remote.off()
m = Monitor()
m.run()
if __name__ == '__main__':
monitor()
| apache-2.0 |
hayderimran7/tempest | tempest/api/image/v1/test_images.py | 8 | 12353 | # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import moves
from tempest.api.image import base
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
CONF = config.CONF
class CreateRegisterImagesTest(base.BaseV1ImageTest):
"""Here we test the registration and creation of images."""
@test.idempotent_id('3027f8e6-3492-4a11-8575-c3293017af4d')
def test_register_then_upload(self):
# Register, then upload an image
properties = {'prop1': 'val1'}
body = self.create_image(name='New Name',
container_format='bare',
disk_format='raw',
is_public=False,
properties=properties)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Name', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
# Now try uploading an image file
image_file = moves.cStringIO(data_utils.random_bytes())
body = self.client.update_image(image_id, data=image_file)
self.assertIn('size', body)
self.assertEqual(1024, body.get('size'))
@test.idempotent_id('69da74d9-68a9-404b-9664-ff7164ccb0f5')
def test_register_remote_image(self):
# Register a new remote image
body = self.create_image(name='New Remote Image',
container_format='bare',
disk_format='raw', is_public=False,
location=CONF.image.http_image,
properties={'key1': 'value1',
'key2': 'value2'})
self.assertIn('id', body)
self.assertEqual('New Remote Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('active', body.get('status'))
properties = body.get('properties')
self.assertEqual(properties['key1'], 'value1')
self.assertEqual(properties['key2'], 'value2')
@test.idempotent_id('6d0e13a7-515b-460c-b91f-9f4793f09816')
def test_register_http_image(self):
body = self.create_image(name='New Http Image',
container_format='bare',
disk_format='raw', is_public=False,
copy_from=CONF.image.http_image)
self.assertIn('id', body)
image_id = body.get('id')
self.assertEqual('New Http Image', body.get('name'))
self.assertFalse(body.get('is_public'))
self.client.wait_for_image_status(image_id, 'active')
self.client.show_image(image_id)
@test.idempotent_id('05b19d55-140c-40d0-b36b-fafd774d421b')
def test_register_image_with_min_ram(self):
# Register an image with min ram
properties = {'prop1': 'val1'}
body = self.create_image(name='New_image_with_min_ram',
container_format='bare',
disk_format='raw',
is_public=False,
min_ram=40,
properties=properties)
self.assertIn('id', body)
self.assertEqual('New_image_with_min_ram', body.get('name'))
self.assertFalse(body.get('is_public'))
self.assertEqual('queued', body.get('status'))
self.assertEqual(40, body.get('min_ram'))
for key, val in properties.items():
self.assertEqual(val, body.get('properties')[key])
self.client.delete_image(body['id'])
class ListImagesTest(base.BaseV1ImageTest):
"""
Here we test the listing of image information
"""
@classmethod
def resource_setup(cls):
super(ListImagesTest, cls).resource_setup()
# We add a few images here to test the listing functionality of
# the images API
img1 = cls._create_remote_image('one', 'bare', 'raw')
img2 = cls._create_remote_image('two', 'ami', 'ami')
img3 = cls._create_remote_image('dup', 'bare', 'raw')
img4 = cls._create_remote_image('dup', 'bare', 'raw')
img5 = cls._create_standard_image('1', 'ami', 'ami', 42)
img6 = cls._create_standard_image('2', 'ami', 'ami', 142)
img7 = cls._create_standard_image('33', 'bare', 'raw', 142)
img8 = cls._create_standard_image('33', 'bare', 'raw', 142)
cls.created_set = set(cls.created_images)
# 4x-4x remote image
cls.remote_set = set((img1, img2, img3, img4))
cls.standard_set = set((img5, img6, img7, img8))
# 5x bare, 3x ami
cls.bare_set = set((img1, img3, img4, img7, img8))
cls.ami_set = set((img2, img5, img6))
# 1x with size 42
cls.size42_set = set((img5,))
# 3x with size 142
cls.size142_set = set((img6, img7, img8,))
# dup named
cls.dup_set = set((img3, img4))
@classmethod
def _create_remote_image(cls, name, container_format, disk_format):
"""
Create a new remote image and return the ID of the newly-registered
image
"""
name = 'New Remote Image %s' % name
location = CONF.image.http_image
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False,
location=location)
image_id = image['id']
return image_id
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image. Note that the size of the new image is a random number between
1024 and 4096
"""
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file)
image_id = image['id']
return image_id
@test.idempotent_id('246178ab-3b33-4212-9a4b-a7fe8261794d')
def test_index_no_params(self):
# Simple test to see all fixture images returned
images_list = self.client.list_images()
image_list = map(lambda x: x['id'], images_list)
for image_id in self.created_images:
self.assertIn(image_id, image_list)
@test.idempotent_id('f1755589-63d6-4468-b098-589820eb4031')
def test_index_disk_format(self):
images_list = self.client.list_images(disk_format='ami')
for image in images_list:
self.assertEqual(image['disk_format'], 'ami')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.ami_set <= result_set)
self.assertFalse(self.created_set - self.ami_set <= result_set)
@test.idempotent_id('2143655d-96d9-4bec-9188-8674206b4b3b')
def test_index_container_format(self):
images_list = self.client.list_images(container_format='bare')
for image in images_list:
self.assertEqual(image['container_format'], 'bare')
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.bare_set <= result_set)
self.assertFalse(self.created_set - self.bare_set <= result_set)
@test.idempotent_id('feb32ac6-22bb-4a16-afd8-9454bb714b14')
def test_index_max_size(self):
images_list = self.client.list_images(size_max=42)
for image in images_list:
self.assertTrue(image['size'] <= 42)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size42_set <= result_set)
self.assertFalse(self.created_set - self.size42_set <= result_set)
@test.idempotent_id('6ffc16d0-4cbf-4401-95c8-4ac63eac34d8')
def test_index_min_size(self):
images_list = self.client.list_images(size_min=142)
for image in images_list:
self.assertTrue(image['size'] >= 142)
result_set = set(map(lambda x: x['id'], images_list))
self.assertTrue(self.size142_set <= result_set)
self.assertFalse(self.size42_set <= result_set)
@test.idempotent_id('e5dc26d9-9aa2-48dd-bda5-748e1445da98')
def test_index_status_active_detail(self):
images_list = self.client.list_images(detail=True,
status='active',
sort_key='size',
sort_dir='desc')
top_size = images_list[0]['size'] # We have non-zero sized images
for image in images_list:
size = image['size']
self.assertTrue(size <= top_size)
top_size = size
self.assertEqual(image['status'], 'active')
@test.idempotent_id('097af10a-bae8-4342-bff4-edf89969ed2a')
def test_index_name(self):
images_list = self.client.list_images(
detail=True,
name='New Remote Image dup')
result_set = set(map(lambda x: x['id'], images_list))
for image in images_list:
self.assertEqual(image['name'], 'New Remote Image dup')
self.assertTrue(self.dup_set <= result_set)
self.assertFalse(self.created_set - self.dup_set <= result_set)
class UpdateImageMetaTest(base.BaseV1ImageTest):
@classmethod
def resource_setup(cls):
super(UpdateImageMetaTest, cls).resource_setup()
cls.image_id = cls._create_standard_image('1', 'ami', 'ami', 42)
@classmethod
def _create_standard_image(cls, name, container_format,
disk_format, size):
"""
Create a new standard image and return the ID of the newly-registered
image.
"""
image_file = moves.cStringIO(data_utils.random_bytes(size))
name = 'New Standard Image %s' % name
image = cls.create_image(name=name,
container_format=container_format,
disk_format=disk_format,
is_public=False, data=image_file,
properties={'key1': 'value1'})
image_id = image['id']
return image_id
@test.idempotent_id('01752c1c-0275-4de3-9e5b-876e44541928')
def test_list_image_metadata(self):
# All metadata key/value pairs for an image should be returned
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'value1'}
self.assertEqual(expected, resp_metadata['properties'])
@test.idempotent_id('d6d7649c-08ce-440d-9ea7-e3dda552f33c')
def test_update_image_metadata(self):
# The metadata for the image should match the updated values
req_metadata = {'key1': 'alt1', 'key2': 'value2'}
metadata = self.client.get_image_meta(self.image_id)
self.assertEqual(metadata['properties'], {'key1': 'value1'})
metadata['properties'].update(req_metadata)
metadata = self.client.update_image(
self.image_id, properties=metadata['properties'])
resp_metadata = self.client.get_image_meta(self.image_id)
expected = {'key1': 'alt1', 'key2': 'value2'}
self.assertEqual(expected, resp_metadata['properties'])
| apache-2.0 |
xifle/home-assistant | homeassistant/components/notify/instapush.py | 11 | 2966 | """
Instapush notification service.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/notify.instapush/
"""
import json
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.notify import (
ATTR_TITLE, ATTR_TITLE_DEFAULT, PLATFORM_SCHEMA, BaseNotificationService)
from homeassistant.const import CONF_API_KEY
_LOGGER = logging.getLogger(__name__)
_RESOURCE = 'https://api.instapush.im/v1/'
CONF_APP_SECRET = 'app_secret'
CONF_EVENT = 'event'
CONF_TRACKER = 'tracker'
DEFAULT_TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_APP_SECRET): cv.string,
vol.Required(CONF_EVENT): cv.string,
vol.Required(CONF_TRACKER): cv.string,
})
def get_service(hass, config):
"""Get the Instapush notification service."""
headers = {'x-instapush-appid': config[CONF_API_KEY],
'x-instapush-appsecret': config[CONF_APP_SECRET]}
try:
response = requests.get(
'{}{}'.format(_RESOURCE, 'events/list'), headers=headers,
timeout=DEFAULT_TIMEOUT).json()
except ValueError:
_LOGGER.error('Unexpected answer from Instapush API.')
return None
if 'error' in response:
_LOGGER.error(response['msg'])
return None
if len([app for app in response
if app['title'] == config[CONF_EVENT]]) == 0:
_LOGGER.error("No app match your given value. "
"Please create an app at https://instapush.im")
return None
return InstapushNotificationService(
config.get(CONF_API_KEY), config.get(CONF_APP_SECRET),
config.get(CONF_EVENT), config.get(CONF_TRACKER))
class InstapushNotificationService(BaseNotificationService):
"""Implementation of the notification service for Instapush."""
def __init__(self, api_key, app_secret, event, tracker):
"""Initialize the service."""
self._api_key = api_key
self._app_secret = app_secret
self._event = event
self._tracker = tracker
self._headers = {
'x-instapush-appid': self._api_key,
'x-instapush-appsecret': self._app_secret,
'Content-Type': 'application/json'}
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
title = kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT)
data = {
'event': self._event,
'trackers': {self._tracker: title + ' : ' + message}
}
response = requests.post(
'{}{}'.format(_RESOURCE, 'post'), data=json.dumps(data),
headers=self._headers, timeout=DEFAULT_TIMEOUT)
if response.json()['status'] == 401:
_LOGGER.error(response.json()['msg'],
"Please check your Instapush settings")
| mit |
rafalkowalski/open-event-orga-server | tests/unittests/test_db_performance.py | 5 | 3907 | import time
import unittest
from flask import url_for
from flask.ext.sqlalchemy import get_debug_queries
from werkzeug.contrib.profiler import ProfilerMiddleware
from app import current_app as app
from app.models import db
from config import ProductionConfig
from populate_db import populate
from tests.unittests.object_mother import ObjectMother
from tests.unittests.setup_database import Setup
from tests.unittests.views.view_test_case import OpenEventViewTestCase
class TestEvents(OpenEventViewTestCase):
def setUp(self):
self.app = Setup.create_app()
app.config['TESTING'] = True
app.secret_key = 'super secret key'
app.config['PROFILE'] = True
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])
with app.test_request_context():
db.create_all()
populate()
def test_db_events(self):
with app.test_request_context():
for i in range(1, 10000):
event = ObjectMother.get_event()
event.name = 'Event' + str(i)
db.session.add(event)
db.session.commit()
url = url_for('sadmin_events.index_view')
self.app.get(url, follow_redirects=True)
with open("output_events.txt", "w") as text_file:
for query in get_debug_queries():
if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT:
text_file.write("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (
query.statement, query.parameters, query.duration, query.context))
text_file.write("\n")
for query in get_debug_queries():
if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT:
app.logger.warning("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (
query.statement, query.parameters, query.duration, query.context))
def test_db_sessions(self):
with app.test_request_context():
# create event
event = ObjectMother.get_event()
db.session.add(event)
db.session.commit()
# test
for i in range(1, 10000):
session = ObjectMother.get_session()
session.name = 'Session' + str(i)
db.session.add(session)
db.session.commit()
url = url_for('sadmin_sessions.display_my_sessions_view')
time.clock()
self.app.get(url, follow_redirects=True)
with open("output_session.txt", "w") as text_file:
for query in get_debug_queries():
if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT:
text_file.write("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (
query.statement, query.parameters, query.duration, query.context))
text_file.write("\n")
def test_db_users(self):
with app.test_request_context():
for i in range(1, 10000):
user = ObjectMother.get_user()
user.email = 'User' + str(i)
db.session.add(user)
db.session.commit()
url = url_for('sadmin_users.index_view')
self.app.get(url, follow_redirects=True)
with open("output_users.txt", "w") as text_file:
for query in get_debug_queries():
if query.duration >= ProductionConfig.DATABASE_QUERY_TIMEOUT:
text_file.write("SLOW QUERY: %s\nParameters: %s\nDuration: %fs\nContext: %s\n" % (
query.statement, query.parameters, query.duration, query.context))
text_file.write("\n")
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ecosoft-odoo/odoo | addons/pos_restaurant/restaurant.py | 325 | 2246 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import logging
import openerp
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
class restaurant_printer(osv.osv):
_name = 'restaurant.printer'
_columns = {
'name' : fields.char('Printer Name', size=32, required=True, help='An internal identification of the printer'),
'proxy_ip': fields.char('Proxy IP Address', size=32, help="The IP Address or hostname of the Printer's hardware proxy"),
'product_categories_ids': fields.many2many('pos.category','printer_category_rel', 'printer_id','category_id',string='Printed Product Categories'),
}
_defaults = {
'name' : 'Printer',
}
class pos_config(osv.osv):
_inherit = 'pos.config'
_columns = {
'iface_splitbill': fields.boolean('Bill Splitting', help='Enables Bill Splitting in the Point of Sale'),
'iface_printbill': fields.boolean('Bill Printing', help='Allows to print the Bill before payment'),
'printer_ids': fields.many2many('restaurant.printer','pos_config_printer_rel', 'config_id','printer_id',string='Order Printers'),
}
_defaults = {
'iface_splitbill': False,
'iface_printbill': False,
}
| agpl-3.0 |
IndonesiaX/edx-platform | lms/djangoapps/verify_student/tests/fake_software_secure.py | 73 | 1697 | """
Fake Software Secure page for use in acceptance tests.
"""
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views.generic.base import View
from edxmako.shortcuts import render_to_response
from lms.djangoapps.verify_student.models import SoftwareSecurePhotoVerification
class SoftwareSecureFakeView(View):
"""
Fake SoftwareSecure view for testing different photo verification statuses
and email functionality.
"""
@method_decorator(login_required)
def get(self, request):
"""
Render a fake Software Secure page that will pick the most recent
attempt for a given user and pass it to the html page.
"""
context_dict = self.response_post_params(request.user)
return render_to_response("verify_student/test/fake_softwaresecure_response.html", context_dict)
@classmethod
def response_post_params(cls, user):
"""
Calculate the POST params we want to send back to the client.
"""
access_key = settings.VERIFY_STUDENT["SOFTWARE_SECURE"]["API_ACCESS_KEY"]
context = {
'receipt_id': None,
'authorization_code': 'SIS {}:0000'.format(access_key),
'results_callback': reverse('verify_student_results_callback')
}
try:
most_recent = SoftwareSecurePhotoVerification.objects.filter(user=user).order_by("-updated_at")[0]
context["receipt_id"] = most_recent.receipt_id
except: # pylint: disable=bare-except
pass
return context
| agpl-3.0 |
xadahiya/django | django/db/models/query.py | 25 | 67826 | """
The main QuerySet implementation. This provides the public API for the ORM.
"""
import copy
import sys
import warnings
from collections import OrderedDict, deque
from django.conf import settings
from django.core import exceptions
from django.db import (
DJANGO_VERSION_PICKLE_KEY, IntegrityError, connections, router,
transaction,
)
from django.db.models import sql
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import Collector
from django.db.models.expressions import F, Date, DateTime
from django.db.models.fields import AutoField
from django.db.models.query_utils import (
Q, InvalidQuery, check_rel_lookup_compatibility, deferred_class_factory,
)
from django.db.models.sql.constants import CURSOR
from django.utils import six, timezone
from django.utils.functional import partition
from django.utils.version import get_version
# The maximum number of items to display in a QuerySet.__repr__
REPR_OUTPUT_SIZE = 20
# Pull into this namespace for backwards compatibility.
EmptyResultSet = sql.EmptyResultSet
class BaseIterator(object):
def __init__(self, queryset):
self.queryset = queryset
class ModelIterator(BaseIterator):
"""
Iterator that yields a model instance for each row.
"""
def __iter__(self):
queryset = self.queryset
db = queryset.db
compiler = queryset.query.get_compiler(using=db)
# Execute the query. This will also fill compiler.select, klass_info,
# and annotations.
results = compiler.execute_sql()
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
compiler.annotation_col_map)
if klass_info is None:
return
model_cls = klass_info['model']
select_fields = klass_info['select_fields']
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
init_list = [f[0].target.attname
for f in select[model_fields_start:model_fields_end]]
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set]
model_cls = deferred_class_factory(model_cls, skip)
related_populators = get_related_populators(klass_info, select, db)
for row in compiler.results_iter(results):
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
if related_populators:
for rel_populator in related_populators:
rel_populator.populate(row, obj)
if annotation_col_map:
for attr_name, col_pos in annotation_col_map.items():
setattr(obj, attr_name, row[col_pos])
# Add the known related objects to the model, if there are any
if queryset._known_related_objects:
for field, rel_objs in queryset._known_related_objects.items():
# Avoid overwriting objects loaded e.g. by select_related
if hasattr(obj, field.get_cache_name()):
continue
pk = getattr(obj, field.get_attname())
try:
rel_obj = rel_objs[pk]
except KeyError:
pass # may happen in qs1 | qs2 scenarios
else:
setattr(obj, field.name, rel_obj)
yield obj
class ValuesIterator(BaseIterator):
"""
Iterator returned by QuerySet.values() that yields a dict
for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
for row in compiler.results_iter():
yield dict(zip(names, row))
class ValuesListIterator(BaseIterator):
"""
Iterator returned by QuerySet.values_lists(flat=False)
that yields a tuple for each row.
"""
def __iter__(self):
queryset = self.queryset
query = queryset.query
compiler = query.get_compiler(queryset.db)
if not query.extra_select and not query.annotation_select:
for row in compiler.results_iter():
yield tuple(row)
else:
field_names = list(query.values_select)
extra_names = list(query.extra_select)
annotation_names = list(query.annotation_select)
# extra(select=...) cols are always at the start of the row.
names = extra_names + field_names + annotation_names
if queryset._fields:
# Reorder according to fields.
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
else:
fields = names
for row in compiler.results_iter():
data = dict(zip(names, row))
yield tuple(data[f] for f in fields)
class FlatValuesListIterator(BaseIterator):
"""
Iterator returned by QuerySet.values_lists(flat=True) that
yields single values.
"""
def __iter__(self):
queryset = self.queryset
compiler = queryset.query.get_compiler(queryset.db)
for row in compiler.results_iter():
yield row[0]
class QuerySet(object):
"""
Represents a lazy database lookup for a set of objects.
"""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.Query(self.model)
self._result_cache = None
self._sticky_filter = False
self._for_write = False
self._prefetch_related_lookups = []
self._prefetch_done = False
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
self._iterator_class = ModelIterator
self._fields = None
def as_manager(cls):
# Address the circular dependency between `Queryset` and `Manager`.
from django.db.models.manager import Manager
manager = Manager.from_queryset(cls)()
manager._built_with_as_manager = True
return manager
as_manager.queryset_only = True
as_manager = classmethod(as_manager)
########################
# PYTHON MAGIC METHODS #
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
obj.__dict__[k] = None
else:
obj.__dict__[k] = copy.deepcopy(v, memo)
return obj
def __getstate__(self):
"""
Allows the QuerySet to be pickled.
"""
# Force the cache to be fully populated.
self._fetch_all()
obj_dict = self.__dict__.copy()
obj_dict[DJANGO_VERSION_PICKLE_KEY] = get_version()
return obj_dict
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled queryset instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled queryset instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def __repr__(self):
data = list(self[:REPR_OUTPUT_SIZE + 1])
if len(data) > REPR_OUTPUT_SIZE:
data[-1] = "...(remaining elements truncated)..."
return repr(data)
def __len__(self):
self._fetch_all()
return len(self._result_cache)
def __iter__(self):
"""
The queryset iterator protocol uses three nested iterators in the
default case:
1. sql.compiler:execute_sql()
- Returns 100 rows at time (constants.GET_ITERATOR_CHUNK_SIZE)
using cursor.fetchmany(). This part is responsible for
doing some column masking, and returning the rows in chunks.
2. sql/compiler.results_iter()
- Returns one row at time. At this point the rows are still just
tuples. In some cases the return values are converted to
Python values at this location.
3. self.iterator()
- Responsible for turning the rows into model objects.
"""
self._fetch_all()
return iter(self._result_cache)
def __bool__(self):
self._fetch_all()
return bool(self._result_cache)
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
if not isinstance(k, (slice,) + six.integer_types):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
(isinstance(k, slice) and (k.start is None or k.start >= 0) and
(k.stop is None or k.stop >= 0))), \
"Negative indexing is not supported."
if self._result_cache is not None:
return self._result_cache[k]
if isinstance(k, slice):
qs = self._clone()
if k.start is not None:
start = int(k.start)
else:
start = None
if k.stop is not None:
stop = int(k.stop)
else:
stop = None
qs.query.set_limits(start, stop)
return list(qs)[::k.step] if k.step else qs
qs = self._clone()
qs.query.set_limits(k, k + 1)
return list(qs)[0]
def __and__(self, other):
self._merge_sanity_check(other)
if isinstance(other, EmptyQuerySet):
return other
if isinstance(self, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.AND)
return combined
def __or__(self, other):
self._merge_sanity_check(other)
if isinstance(self, EmptyQuerySet):
return other
if isinstance(other, EmptyQuerySet):
return self
combined = self._clone()
combined._merge_known_related_objects(other)
combined.query.combine(other.query, sql.OR)
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator(self):
"""
An iterator over the results from applying this QuerySet to the
database.
"""
return self._iterator_class(self)
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
"""
if self.query.distinct_fields:
raise NotImplementedError("aggregate() + distinct(fields) not implemented.")
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
arg.default_alias
except (AttributeError, TypeError):
raise TypeError("Complex aggregates require an alias")
kwargs[arg.default_alias] = arg
query = self.query.clone()
for (alias, aggregate_expr) in kwargs.items():
query.add_annotation(aggregate_expr, alias, is_summary=True)
if not query.annotations[alias].contains_aggregate:
raise TypeError("%s is not an aggregate expression" % alias)
return query.get_aggregation(self.db, kwargs.keys())
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
return self.query.get_count(using=self.db)
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
if self.query.can_filter() and not self.query.distinct_fields:
clone = clone.order_by()
num = len(clone)
if num == 1:
return clone._result_cache[0]
if not num:
raise self.model.DoesNotExist(
"%s matching query does not exist." %
self.model._meta.object_name
)
raise self.model.MultipleObjectsReturned(
"get() returned more than one %s -- it returned %s!" %
(self.model._meta.object_name, num)
)
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
self._for_write = True
obj.save(force_insert=True, using=self.db)
return obj
def _populate_pk_values(self, objs):
for obj in objs:
if obj.pk is None:
obj.pk = obj._meta.pk.get_pk_value_on_save(obj)
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
autoincrement field. Multi-table models are not supported.
"""
# So this case is fun. When you bulk insert you don't get the primary
# keys back (if it's an autoincrement), so you can't insert into the
# child tables which references this. There are two workarounds, 1)
# this could be implemented if you didn't have an autoincrement pk,
# and 2) you could do it by doing O(n) normal inserts into the parent
# tables to get the primary keys back, and then doing a single bulk
# insert into the childmost table. Some databases might allow doing
# this by using RETURNING clause for the insert query. We're punting
# on these for now because they are relatively rare cases.
assert batch_size is None or batch_size > 0
# Check that the parents share the same concrete model with the our
# model to detect the inheritance pattern ConcreteGrandParent ->
# MultiTableParent -> ProxyChild. Simply checking self.model._meta.proxy
# would not identify that case as involving multiple tables.
for parent in self.model._meta.get_parent_list():
if parent._meta.concrete_model is not self.model._meta.concrete_model:
raise ValueError("Can't bulk create a multi-table inherited model")
if not objs:
return objs
self._for_write = True
connection = connections[self.db]
fields = self.model._meta.concrete_fields
objs = list(objs)
self._populate_pk_values(objs)
with transaction.atomic(using=self.db, savepoint=False):
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size)
else:
objs_with_pk, objs_without_pk = partition(lambda o: o.pk is None, objs)
if objs_with_pk:
self._batched_insert(objs_with_pk, fields, batch_size)
if objs_without_pk:
fields = [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size)
return objs
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
return self._create_object_from_params(lookup, params)
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
lookup, params = self._extract_model_params(defaults, **kwargs)
self._for_write = True
try:
obj = self.get(**lookup)
except self.model.DoesNotExist:
obj, created = self._create_object_from_params(lookup, params)
if created:
return obj, created
for k, v in six.iteritems(defaults):
setattr(obj, k, v)
with transaction.atomic(using=self.db, savepoint=False):
obj.save(using=self.db)
return obj, False
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
"""
try:
with transaction.atomic(using=self.db):
obj = self.create(**params)
return obj, True
except IntegrityError:
exc_info = sys.exc_info()
try:
return self.get(**lookup), False
except self.model.DoesNotExist:
pass
six.reraise(*exc_info)
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
"""
defaults = defaults or {}
lookup = kwargs.copy()
for f in self.model._meta.fields:
if f.attname in lookup:
lookup[f.name] = lookup.pop(f.attname)
params = {k: v for k, v in kwargs.items() if LOOKUP_SEP not in k}
params.update(defaults)
return lookup, params
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
assert bool(order_by), "earliest() and latest() require either a "\
"field_name parameter or 'get_latest_by' in the model"
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken."
obj = self._clone()
obj.query.set_limits(high=1)
obj.query.clear_ordering(force_empty=True)
obj.query.add_ordering('%s%s' % (direction, order_by))
return obj.get()
def earliest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="")
def latest(self, field_name=None):
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
return None
def in_bulk(self, id_list):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
if not id_list:
return {}
qs = self.filter(pk__in=id_list).order_by()
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
if self._fields is not None:
raise TypeError("Cannot call delete() after .values() or .values_list()")
del_query = self._clone()
# The delete is actually 2 queries - one to find related objects,
# and one to delete. Make sure that the discovery of related
# objects is performed on the same database as the deletion.
del_query._for_write = True
# Disable non-supported fields.
del_query.query.select_for_update = False
del_query.query.select_related = False
del_query.query.clear_ordering(force_empty=True)
collector = Collector(using=del_query.db)
collector.collect(del_query)
deleted, _rows_count = collector.delete()
# Clear the result cache, in case this QuerySet gets reused.
self._result_cache = None
return deleted, _rows_count
delete.alters_data = True
delete.queryset_only = True
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
self._for_write = True
query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs)
with transaction.atomic(using=self.db, savepoint=False):
rows = query.get_compiler(self.db).execute_sql(CURSOR)
self._result_cache = None
return rows
update.alters_data = True
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
"""
assert self.query.can_filter(), \
"Cannot update a query once a slice has been taken."
query = self.query.clone(sql.UpdateQuery)
query.add_update_fields(values)
self._result_cache = None
return query.get_compiler(self.db).execute_sql(CURSOR)
_update.alters_data = True
_update.queryset_only = False
def exists(self):
if self._result_cache is None:
return self.query.has_results(using=self.db)
return bool(self._result_cache)
def _prefetch_related_objects(self):
# This method can only be called once the result cache has been filled.
prefetch_related_objects(self._result_cache, self._prefetch_related_lookups)
self._prefetch_done = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def raw(self, raw_query, params=None, translations=None, using=None):
if using is None:
using = self.db
return RawQuerySet(raw_query, model=self.model,
params=params, translations=translations,
using=using)
def _values(self, *fields):
clone = self._clone()
clone._fields = fields
query = clone.query
query.select_related = False
query.clear_deferred_loading()
query.clear_select_fields()
if query.group_by is True:
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
query.set_group_by()
query.clear_select_fields()
if fields:
field_names = []
extra_names = []
annotation_names = []
if not query._extra and not query._annotations:
# Shortcut - if there are no extra or annotations, then
# the values() clause must be just field names.
field_names = list(fields)
else:
query.default_cols = False
for f in fields:
if f in query.extra_select:
extra_names.append(f)
elif f in query.annotation_select:
annotation_names.append(f)
else:
field_names.append(f)
query.set_extra_mask(extra_names)
query.set_annotation_mask(annotation_names)
else:
field_names = [f.attname for f in self.model._meta.concrete_fields]
query.values_select = field_names
query.add_fields(field_names, True)
return clone
def values(self, *fields):
clone = self._values(*fields)
clone._iterator_class = ValuesIterator
return clone
def values_list(self, *fields, **kwargs):
flat = kwargs.pop('flat', False)
if kwargs:
raise TypeError('Unexpected keyword arguments to values_list: %s'
% (list(kwargs),))
if flat and len(fields) > 1:
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
clone = self._values(*fields)
clone._iterator_class = FlatValuesListIterator if flat else ValuesListIterator
return clone
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
"'kind' must be one of 'year', 'month' or 'day'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
return self.annotate(
datefield=Date(field_name, kind),
plain_field=F(field_name)
).values_list(
'datefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datefield')
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
"'kind' must be one of 'year', 'month', 'day', 'hour', 'minute' or 'second'."
assert order in ('ASC', 'DESC'), \
"'order' must be either 'ASC' or 'DESC'."
if settings.USE_TZ:
if tzinfo is None:
tzinfo = timezone.get_current_timezone()
else:
tzinfo = None
return self.annotate(
datetimefield=DateTime(field_name, kind, tzinfo),
plain_field=F(field_name)
).values_list(
'datetimefield', flat=True
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
clone = self._clone()
clone.query.set_empty()
return clone
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
def _filter_or_exclude(self, negate, *args, **kwargs):
if args or kwargs:
assert self.query.can_filter(), \
"Cannot filter a query once a slice has been taken."
clone = self._clone()
if negate:
clone.query.add_q(~Q(*args, **kwargs))
else:
clone.query.add_q(Q(*args, **kwargs))
return clone
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
This exists to support framework features such as 'limit_choices_to',
and usually it will be more natural to use other methods.
"""
if isinstance(filter_obj, Q) or hasattr(filter_obj, 'add_to_query'):
clone = self._clone()
clone.query.add_q(filter_obj)
return clone
else:
return self._filter_or_exclude(None, **filter_obj)
def select_for_update(self, nowait=False):
"""
Returns a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
obj = self._clone()
obj._for_write = True
obj.query.select_for_update = True
obj.query.select_for_update_nowait = nowait
return obj
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
"""
obj = self._clone()
if fields == (None,):
obj.query.select_related = False
elif fields:
obj.query.add_select_related(fields)
else:
obj.query.select_related = True
return obj
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
"""
clone = self._clone()
if lookups == (None,):
clone._prefetch_related_lookups = []
else:
clone._prefetch_related_lookups.extend(lookups)
return clone
def annotate(self, *args, **kwargs):
"""
Return a query set in which the returned objects have been annotated
with extra data or aggregations.
"""
annotations = OrderedDict() # To preserve ordering of args
for arg in args:
# The default_alias property may raise a TypeError, so we use
# a try/except construct rather than hasattr in order to remain
# consistent between PY2 and PY3 (hasattr would swallow
# the TypeError on PY2).
try:
if arg.default_alias in kwargs:
raise ValueError("The named annotation '%s' conflicts with the "
"default name for another annotation."
% arg.default_alias)
except (AttributeError, TypeError):
raise TypeError("Complex annotations require an alias")
annotations[arg.default_alias] = arg
annotations.update(kwargs)
clone = self._clone()
names = self._fields
if names is None:
names = {f.name for f in self.model._meta.get_fields()}
for alias, annotation in annotations.items():
if alias in names:
raise ValueError("The annotation '%s' conflicts with a field on "
"the model." % alias)
clone.query.add_annotation(annotation, alias, is_summary=False)
for alias, annotation in clone.query.annotations.items():
if alias in annotations and annotation.contains_aggregate:
if clone._fields is None:
clone.query.group_by = True
else:
clone.query.set_group_by()
break
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
obj.query.clear_ordering(force_empty=False)
obj.query.add_ordering(*field_names)
return obj
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
obj = self._clone()
obj.query.add_distinct_fields(*field_names)
return obj
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
clone.query.add_extra(select, select_params, where, params, tables, order_by)
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
clone = self._clone()
if fields == (None,):
clone.query.clear_deferred_loading()
else:
clone.query.add_deferred_loading(fields)
return clone
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
if self._fields is not None:
raise TypeError("Cannot call only() after .values() or .values_list()")
if fields == (None,):
# Can only pass None to defer(), not only(), as the rest option.
# That won't stop people trying to do this, so let's be explicit.
raise TypeError("Cannot pass None as an argument to only().")
clone = self._clone()
clone.query.add_immediate_loading(fields)
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
clone = self._clone()
clone._db = alias
return clone
###################################
# PUBLIC INTROSPECTION ATTRIBUTES #
###################################
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
return True
elif self.query.default_ordering and self.query.get_meta().ordering:
return True
else:
return False
ordered = property(ordered)
@property
def db(self):
"Return the database that will be used if this query is executed now"
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
###################
# PRIVATE METHODS #
###################
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
if using is None:
using = self.db
query = sql.InsertQuery(self.model)
query.insert_values(fields, objs, raw=raw)
return query.get_compiler(using=using).execute_sql(return_id)
_insert.alters_data = True
_insert.queryset_only = False
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
"""
if not objs:
return
ops = connections[self.db].ops
batch_size = (batch_size or max(ops.bulk_batch_size(fields, objs), 1))
for batch in [objs[i:i + batch_size]
for i in range(0, len(objs), batch_size)]:
self.model._base_manager._insert(batch, fields=fields,
using=self.db)
def _clone(self, **kwargs):
query = self.query.clone()
if self._sticky_filter:
query.filter_is_sticky = True
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
clone._for_write = self._for_write
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
clone._known_related_objects = self._known_related_objects
clone._iterator_class = self._iterator_class
clone._fields = self._fields
clone.__dict__.update(kwargs)
return clone
def _fetch_all(self):
if self._result_cache is None:
self._result_cache = list(self.iterator())
if self._prefetch_related_lookups and not self._prefetch_done:
self._prefetch_related_objects()
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
This doesn't return a clone of the current QuerySet (it returns
"self"). The method is only used internally and should be immediately
followed by a filter() that does create a clone.
"""
self._sticky_filter = True
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
set(self.query.annotation_select) != set(other.query.annotation_select)):
raise TypeError("Merging '%s' classes must involve the same values in each case."
% self.__class__.__name__)
def _merge_known_related_objects(self, other):
"""
Keep track of all known related objects from either QuerySet instance.
"""
for field, objects in other._known_related_objects.items():
self._known_related_objects.setdefault(field, {}).update(objects)
def _prepare(self):
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
return self
def _as_sql(self, connection):
"""
Returns the internal query's SQL and parameters (as a tuple).
"""
if self._fields is not None:
# values() queryset can only be used as nested queries
# if they are set up to select only a single field.
if len(self._fields or self.model._meta.concrete_fields) > 1:
raise TypeError('Cannot use multi-field values as a filter value.')
clone = self._clone()
else:
clone = self.values('pk')
if clone._db is None or connection == connections[clone._db]:
return clone.query.get_compiler(connection=connection).as_nested_sql()
raise ValueError("Can't do subqueries with queries on different DBs.")
# When used as part of a nested query, a queryset will never be an "always
# empty" result.
value_annotation = True
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
def is_compatible_query_object_type(self, opts, field):
"""
Check that using this queryset as the rhs value for a lookup is
allowed. The opts are the options of the relation's target we are
querying against. For example in .filter(author__in=Author.objects.all())
the opts would be Author's (from the author field) and self.model would
be Author.objects.all() queryset's .model (Author also). The field is
the related field on the lhs side.
"""
# We trust that users of values() know what they are doing.
if self._fields is not None:
return True
return check_rel_lookup_compatibility(self.model, opts, field)
is_compatible_query_object_type.queryset_only = True
class InstanceCheckMeta(type):
def __instancecheck__(self, instance):
return instance.query.is_empty()
class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
"""
Marker class usable for checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
def __init__(self, *args, **kwargs):
raise TypeError("EmptyQuerySet can't be instantiated")
class RawQuerySet(object):
"""
Provides an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
translations=None, using=None, hints=None):
self.raw_query = raw_query
self.model = model
self._db = using
self._hints = hints or {}
self.query = query or sql.RawQuery(sql=raw_query, using=self.db, params=params)
self.params = params or ()
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
model_init_names = [f.attname for f in self.model._meta.fields
if f.attname in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
model_init_order = [self.columns.index(fname) for fname in model_init_names]
return model_init_names, model_init_order, annotation_fields
def __iter__(self):
# Cache some things for performance reasons outside the loop.
db = self.db
compiler = connections[db].ops.compiler('SQLCompiler')(
self.query, connections[db], db
)
query = iter(self.query)
try:
model_init_names, model_init_pos, annotation_fields = self.resolve_model_init_order()
# Find out which model's fields are not present in the query.
skip = set()
for field in self.model._meta.fields:
if field.attname not in model_init_names:
skip.add(field.attname)
if skip:
if self.model._meta.pk.attname in skip:
raise InvalidQuery('Raw query must include the primary key')
model_cls = deferred_class_factory(self.model, skip)
else:
model_cls = self.model
fields = [self.model_fields.get(c) for c in self.columns]
converters = compiler.get_converters([
f.get_col(f.model._meta.db_table) if f else None for f in fields
])
for values in query:
if converters:
values = compiler.apply_converters(values, converters)
# Associate fields to values
model_init_values = [values[pos] for pos in model_init_pos]
instance = model_cls.from_db(db, model_init_names, model_init_values)
if annotation_fields:
for column, pos in annotation_fields:
setattr(instance, column, values[pos])
yield instance
finally:
# Done iterating the Query. If it has its own cursor, close it.
if hasattr(self.query, 'cursor') and self.query.cursor:
self.query.cursor.close()
def __repr__(self):
return "<RawQuerySet: %s>" % self.query
def __getitem__(self, k):
return list(self)[k]
@property
def db(self):
"Return the database that will be used if this query is executed now"
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
return RawQuerySet(self.raw_query, model=self.model,
query=self.query.clone(using=alias),
params=self.params, translations=self.translations,
using=alias)
@property
def columns(self):
"""
A list of model field names in the order they'll appear in the
query results.
"""
if not hasattr(self, '_columns'):
self._columns = self.query.get_columns()
# Adjust any column names which don't match field names
for (query_name, model_name) in self.translations.items():
try:
index = self._columns.index(query_name)
self._columns[index] = model_name
except ValueError:
# Ignore translations for non-existent column names
pass
return self._columns
@property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
if not hasattr(self, '_model_fields'):
converter = connections[self.db].introspection.table_name_converter
self._model_fields = {}
for field in self.model._meta.fields:
name, column = field.get_attname_column()
self._model_fields[converter(column)] = field
return self._model_fields
class Prefetch(object):
def __init__(self, lookup, queryset=None, to_attr=None):
# `prefetch_through` is the path we traverse to perform the prefetch.
self.prefetch_through = lookup
# `prefetch_to` is the path to the attribute that stores the result.
self.prefetch_to = lookup
if to_attr:
self.prefetch_to = LOOKUP_SEP.join(lookup.split(LOOKUP_SEP)[:-1] + [to_attr])
self.queryset = queryset
self.to_attr = to_attr
def add_prefix(self, prefix):
self.prefetch_through = LOOKUP_SEP.join([prefix, self.prefetch_through])
self.prefetch_to = LOOKUP_SEP.join([prefix, self.prefetch_to])
def get_current_prefetch_through(self, level):
return LOOKUP_SEP.join(self.prefetch_through.split(LOOKUP_SEP)[:level + 1])
def get_current_prefetch_to(self, level):
return LOOKUP_SEP.join(self.prefetch_to.split(LOOKUP_SEP)[:level + 1])
def get_current_to_attr(self, level):
parts = self.prefetch_to.split(LOOKUP_SEP)
to_attr = parts[level]
as_attr = self.to_attr and level == len(parts) - 1
return to_attr, as_attr
def get_current_queryset(self, level):
if self.get_current_prefetch_to(level) == self.prefetch_to:
return self.queryset
return None
def __eq__(self, other):
if isinstance(other, Prefetch):
return self.prefetch_to == other.prefetch_to
return False
def __hash__(self):
return hash(self.__class__) ^ hash(self.prefetch_to)
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
lookup = Prefetch(lookup)
if prefix:
lookup.add_prefix(prefix)
ret.append(lookup)
return ret
def prefetch_related_objects(result_cache, related_lookups):
"""
Helper function for prefetch_related functionality
Populates prefetched objects caches for a list of results
from a QuerySet
"""
if len(result_cache) == 0:
return # nothing to do
related_lookups = normalize_prefetch_lookups(related_lookups)
# We need to be able to dynamically add to the list of prefetch_related
# lookups that we look up (see below). So we need some book keeping to
# ensure we don't do duplicate work.
done_queries = {} # dictionary of things like 'foo__bar': [results]
auto_lookups = set() # we add to this as we go through.
followed_descriptors = set() # recursion protection
all_lookups = deque(related_lookups)
while all_lookups:
lookup = all_lookups.popleft()
if lookup.prefetch_to in done_queries:
if lookup.queryset:
raise ValueError("'%s' lookup was already seen with a different queryset. "
"You may need to adjust the ordering of your lookups." % lookup.prefetch_to)
continue
# Top level, the list of objects to decorate is the result cache
# from the primary QuerySet. It won't be for deeper levels.
obj_list = result_cache
through_attrs = lookup.prefetch_through.split(LOOKUP_SEP)
for level, through_attr in enumerate(through_attrs):
# Prepare main instances
if len(obj_list) == 0:
break
prefetch_to = lookup.get_current_prefetch_to(level)
if prefetch_to in done_queries:
# Skip any prefetching, and any object preparation
obj_list = done_queries[prefetch_to]
continue
# Prepare objects:
good_objects = True
for obj in obj_list:
# Since prefetching can re-use instances, it is possible to have
# the same instance multiple times in obj_list, so obj might
# already be prepared.
if not hasattr(obj, '_prefetched_objects_cache'):
try:
obj._prefetched_objects_cache = {}
except AttributeError:
# Must be in a QuerySet subclass that is not returning
# Model instances, either in Django or 3rd
# party. prefetch_related() doesn't make sense, so quit
# now.
good_objects = False
break
if not good_objects:
break
# Descend down tree
# We assume that objects retrieved are homogeneous (which is the premise
# of prefetch_related), so what applies to first object applies to all.
first_obj = obj_list[0]
prefetcher, descriptor, attr_found, is_fetched = get_prefetcher(first_obj, through_attr)
if not attr_found:
raise AttributeError("Cannot find '%s' on %s object, '%s' is an invalid "
"parameter to prefetch_related()" %
(through_attr, first_obj.__class__.__name__, lookup.prefetch_through))
if level == len(through_attrs) - 1 and prefetcher is None:
# Last one, this *must* resolve to something that supports
# prefetching, otherwise there is no point adding it and the
# developer asking for it has made a mistake.
raise ValueError("'%s' does not resolve to an item that supports "
"prefetching - this is an invalid parameter to "
"prefetch_related()." % lookup.prefetch_through)
if prefetcher is not None and not is_fetched:
obj_list, additional_lookups = prefetch_one_level(obj_list, prefetcher, lookup, level)
# We need to ensure we don't keep adding lookups from the
# same relationships to stop infinite recursion. So, if we
# are already on an automatically added lookup, don't add
# the new lookups from relationships we've seen already.
if not (lookup in auto_lookups and descriptor in followed_descriptors):
done_queries[prefetch_to] = obj_list
new_lookups = normalize_prefetch_lookups(additional_lookups, prefetch_to)
auto_lookups.update(new_lookups)
all_lookups.extendleft(new_lookups)
followed_descriptors.add(descriptor)
else:
# Either a singly related object that has already been fetched
# (e.g. via select_related), or hopefully some other property
# that doesn't support prefetching but needs to be traversed.
# We replace the current list of parent objects with the list
# of related objects, filtering out empty or missing values so
# that we can continue with nullable or reverse relations.
new_obj_list = []
for obj in obj_list:
try:
new_obj = getattr(obj, through_attr)
except exceptions.ObjectDoesNotExist:
continue
if new_obj is None:
continue
# We special-case `list` rather than something more generic
# like `Iterable` because we don't want to accidentally match
# user models that define __iter__.
if isinstance(new_obj, list):
new_obj_list.extend(new_obj)
else:
new_obj_list.append(new_obj)
obj_list = new_obj_list
def get_prefetcher(instance, attr):
"""
For the attribute 'attr' on the given instance, finds
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
a boolean that is True if the attribute has already been fetched)
"""
prefetcher = None
is_fetched = False
# For singly related objects, we have to avoid getting the attribute
# from the object, as this will trigger the query. So we first try
# on the class, in order to get the descriptor object.
rel_obj_descriptor = getattr(instance.__class__, attr, None)
if rel_obj_descriptor is None:
attr_found = hasattr(instance, attr)
else:
attr_found = True
if rel_obj_descriptor:
# singly related object, descriptor object has the
# get_prefetch_queryset() method.
if hasattr(rel_obj_descriptor, 'get_prefetch_queryset'):
prefetcher = rel_obj_descriptor
if rel_obj_descriptor.is_cached(instance):
is_fetched = True
else:
# descriptor doesn't support prefetching, so we go ahead and get
# the attribute on the instance rather than the class to
# support many related managers
rel_obj = getattr(instance, attr)
if hasattr(rel_obj, 'get_prefetch_queryset'):
prefetcher = rel_obj
return prefetcher, rel_obj_descriptor, attr_found, is_fetched
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Runs prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:
# (queryset of instances of self.model that are related to passed in instances,
# callable that gets value to be matched for returned instances,
# callable that gets value to be matched for passed in instances,
# boolean that is True for singly related objects,
# cache name to assign to).
# The 'values to be matched' must be hashable as they will be used
# in a dictionary.
rel_qs, rel_obj_attr, instance_attr, single, cache_name = (
prefetcher.get_prefetch_queryset(instances, lookup.get_current_queryset(level)))
# We have to handle the possibility that the QuerySet we just got back
# contains some prefetch_related lookups. We don't want to trigger the
# prefetch_related functionality by evaluating the query. Rather, we need
# to merge in the prefetch_related lookups.
# Copy the lookups in case it is a Prefetch object which could be reused
# later (happens in nested prefetch_related).
additional_lookups = [
copy.copy(additional_lookup) for additional_lookup
in getattr(rel_qs, '_prefetch_related_lookups', [])
]
if additional_lookups:
# Don't need to clone because the manager should have given us a fresh
# instance, so we access an internal instead of using public interface
# for performance reasons.
rel_qs._prefetch_related_lookups = []
all_related_objects = list(rel_qs)
rel_obj_cache = {}
for rel_obj in all_related_objects:
rel_attr_val = rel_obj_attr(rel_obj)
rel_obj_cache.setdefault(rel_attr_val, []).append(rel_obj)
for obj in instances:
instance_attr_val = instance_attr(obj)
vals = rel_obj_cache.get(instance_attr_val, [])
to_attr, as_attr = lookup.get_current_to_attr(level)
if single:
val = vals[0] if vals else None
to_attr = to_attr if as_attr else cache_name
setattr(obj, to_attr, val)
else:
if as_attr:
setattr(obj, to_attr, vals)
else:
# Cache in the QuerySet.all().
qs = getattr(obj, to_attr).all()
qs._result_cache = vals
# We don't want the individual qs doing prefetch_related now,
# since we have merged this into the current work.
qs._prefetch_done = True
obj._prefetched_objects_cache[cache_name] = qs
return all_related_objects, additional_lookups
class RelatedPopulator(object):
"""
RelatedPopulator is used for select_related() object instantiation.
The idea is that each select_related() model will be populated by a
different RelatedPopulator instance. The RelatedPopulator instances get
klass_info and select (computed in SQLCompiler) plus the used db as
input for initialization. That data is used to compute which columns
to use, how to instantiate the model, and how to populate the links
between the objects.
The actual creation of the objects is done in populate() method. This
method gets row and from_obj as input and populates the select_related()
model instance.
"""
def __init__(self, klass_info, select, db):
self.db = db
# Pre-compute needed attributes. The attributes are:
# - model_cls: the possibly deferred model class to instantiate
# - either:
# - cols_start, cols_end: usually the columns in the row are
# in the same order model_cls.__init__ expects them, so we
# can instantiate by model_cls(*row[cols_start:cols_end])
# - reorder_for_init: When select_related descends to a child
# class, then we want to reuse the already selected parent
# data. However, in this case the parent data isn't necessarily
# in the same order that Model.__init__ expects it to be, so
# we have to reorder the parent data. The reorder_for_init
# attribute contains a function used to reorder the field data
# in the order __init__ expects it.
# - pk_idx: the index of the primary key field in the reordered
# model data. Used to check if a related object exists at all.
# - init_list: the field attnames fetched from the database. For
# deferred models this isn't the same as all attnames of the
# model's fields.
# - related_populators: a list of RelatedPopulator instances if
# select_related() descends to related models from this model.
# - cache_name, reverse_cache_name: the names to use for setattr
# when assigning the fetched object to the from_obj. If the
# reverse_cache_name is set, then we also set the reverse link.
select_fields = klass_info['select_fields']
from_parent = klass_info['from_parent']
if not from_parent:
self.cols_start = select_fields[0]
self.cols_end = select_fields[-1] + 1
self.init_list = [
f[0].target.attname for f in select[self.cols_start:self.cols_end]
]
self.reorder_for_init = None
else:
model_init_attnames = [
f.attname for f in klass_info['model']._meta.concrete_fields
]
reorder_map = []
for idx in select_fields:
field = select[idx][0].target
init_pos = model_init_attnames.index(field.attname)
reorder_map.append((init_pos, field.attname, idx))
reorder_map.sort()
self.init_list = [v[1] for v in reorder_map]
pos_list = [row_pos for _, _, row_pos in reorder_map]
def reorder_for_init(row):
return [row[row_pos] for row_pos in pos_list]
self.reorder_for_init = reorder_for_init
self.model_cls = self.get_deferred_cls(klass_info, self.init_list)
self.pk_idx = self.init_list.index(self.model_cls._meta.pk.attname)
self.related_populators = get_related_populators(klass_info, select, self.db)
field = klass_info['field']
reverse = klass_info['reverse']
self.reverse_cache_name = None
if reverse:
self.cache_name = field.remote_field.get_cache_name()
self.reverse_cache_name = field.get_cache_name()
else:
self.cache_name = field.get_cache_name()
if field.unique:
self.reverse_cache_name = field.remote_field.get_cache_name()
def get_deferred_cls(self, klass_info, init_list):
model_cls = klass_info['model']
if len(init_list) != len(model_cls._meta.concrete_fields):
init_set = set(init_list)
skip = [
f.attname for f in model_cls._meta.concrete_fields
if f.attname not in init_set
]
model_cls = deferred_class_factory(model_cls, skip)
return model_cls
def populate(self, row, from_obj):
if self.reorder_for_init:
obj_data = self.reorder_for_init(row)
else:
obj_data = row[self.cols_start:self.cols_end]
if obj_data[self.pk_idx] is None:
obj = None
else:
obj = self.model_cls.from_db(self.db, self.init_list, obj_data)
if obj and self.related_populators:
for rel_iter in self.related_populators:
rel_iter.populate(row, obj)
setattr(from_obj, self.cache_name, obj)
if obj and self.reverse_cache_name:
setattr(obj, self.reverse_cache_name, from_obj)
def get_related_populators(klass_info, select, db):
iterators = []
related_klass_infos = klass_info.get('related_klass_infos', [])
for rel_klass_info in related_klass_infos:
rel_cls = RelatedPopulator(rel_klass_info, select, db)
iterators.append(rel_cls)
return iterators
| bsd-3-clause |
zhukaixy/kbengine | kbe/res/scripts/common/Lib/quopri.py | 69 | 7250 | #! /usr/bin/env python3
"""Conversions to/from quoted-printable transport encoding as per RFC 1521."""
# (Dec 1991 version).
__all__ = ["encode", "decode", "encodestring", "decodestring"]
ESCAPE = b'='
MAXLINESIZE = 76
HEX = b'0123456789ABCDEF'
EMPTYSTRING = b''
try:
from binascii import a2b_qp, b2a_qp
except ImportError:
a2b_qp = None
b2a_qp = None
def needsquoting(c, quotetabs, header):
"""Decide whether a particular byte ordinal needs to be quoted.
The 'quotetabs' flag indicates whether embedded tabs and spaces should be
quoted. Note that line-ending tabs and spaces are always encoded, as per
RFC 1521.
"""
assert isinstance(c, bytes)
if c in b' \t':
return quotetabs
# if header, we have to escape _ because _ is used to escape space
if c == b'_':
return header
return c == ESCAPE or not (b' ' <= c <= b'~')
def quote(c):
"""Quote a single character."""
assert isinstance(c, bytes) and len(c)==1
c = ord(c)
return ESCAPE + bytes((HEX[c//16], HEX[c%16]))
def encode(input, output, quotetabs, header=False):
"""Read 'input', apply quoted-printable encoding, and write to 'output'.
'input' and 'output' are binary file objects. The 'quotetabs' flag
indicates whether embedded tabs and spaces should be quoted. Note that
line-ending tabs and spaces are always encoded, as per RFC 1521.
The 'header' flag indicates whether we are encoding spaces as _ as per RFC
1522."""
if b2a_qp is not None:
data = input.read()
odata = b2a_qp(data, quotetabs=quotetabs, header=header)
output.write(odata)
return
def write(s, output=output, lineEnd=b'\n'):
# RFC 1521 requires that the line ending in a space or tab must have
# that trailing character encoded.
if s and s[-1:] in b' \t':
output.write(s[:-1] + quote(s[-1:]) + lineEnd)
elif s == b'.':
output.write(quote(s) + lineEnd)
else:
output.write(s + lineEnd)
prevline = None
while 1:
line = input.readline()
if not line:
break
outline = []
# Strip off any readline induced trailing newline
stripped = b''
if line[-1:] == b'\n':
line = line[:-1]
stripped = b'\n'
# Calculate the un-length-limited encoded line
for c in line:
c = bytes((c,))
if needsquoting(c, quotetabs, header):
c = quote(c)
if header and c == b' ':
outline.append(b'_')
else:
outline.append(c)
# First, write out the previous line
if prevline is not None:
write(prevline)
# Now see if we need any soft line breaks because of RFC-imposed
# length limitations. Then do the thisline->prevline dance.
thisline = EMPTYSTRING.join(outline)
while len(thisline) > MAXLINESIZE:
# Don't forget to include the soft line break `=' sign in the
# length calculation!
write(thisline[:MAXLINESIZE-1], lineEnd=b'=\n')
thisline = thisline[MAXLINESIZE-1:]
# Write out the current line
prevline = thisline
# Write out the last line, without a trailing newline
if prevline is not None:
write(prevline, lineEnd=stripped)
def encodestring(s, quotetabs=False, header=False):
if b2a_qp is not None:
return b2a_qp(s, quotetabs=quotetabs, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
encode(infp, outfp, quotetabs, header)
return outfp.getvalue()
def decode(input, output, header=False):
"""Read 'input', apply quoted-printable decoding, and write to 'output'.
'input' and 'output' are binary file objects.
If 'header' is true, decode underscore as space (per RFC 1522)."""
if a2b_qp is not None:
data = input.read()
odata = a2b_qp(data, header=header)
output.write(odata)
return
new = b''
while 1:
line = input.readline()
if not line: break
i, n = 0, len(line)
if n > 0 and line[n-1:n] == b'\n':
partial = 0; n = n-1
# Strip trailing whitespace
while n > 0 and line[n-1:n] in b" \t\r":
n = n-1
else:
partial = 1
while i < n:
c = line[i:i+1]
if c == b'_' and header:
new = new + b' '; i = i+1
elif c != ESCAPE:
new = new + c; i = i+1
elif i+1 == n and not partial:
partial = 1; break
elif i+1 < n and line[i+1] == ESCAPE:
new = new + ESCAPE; i = i+2
elif i+2 < n and ishex(line[i+1:i+2]) and ishex(line[i+2:i+3]):
new = new + bytes((unhex(line[i+1:i+3]),)); i = i+3
else: # Bad escape sequence -- leave it in
new = new + c; i = i+1
if not partial:
output.write(new + b'\n')
new = b''
if new:
output.write(new)
def decodestring(s, header=False):
if a2b_qp is not None:
return a2b_qp(s, header=header)
from io import BytesIO
infp = BytesIO(s)
outfp = BytesIO()
decode(infp, outfp, header=header)
return outfp.getvalue()
# Other helper functions
def ishex(c):
"""Return true if the byte ordinal 'c' is a hexadecimal digit in ASCII."""
assert isinstance(c, bytes)
return b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F'
def unhex(s):
"""Get the integer value of a hexadecimal number."""
bits = 0
for c in s:
c = bytes((c,))
if b'0' <= c <= b'9':
i = ord('0')
elif b'a' <= c <= b'f':
i = ord('a')-10
elif b'A' <= c <= b'F':
i = ord(b'A')-10
else:
assert False, "non-hex digit "+repr(c)
bits = bits*16 + (ord(c) - i)
return bits
def main():
import sys
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'td')
except getopt.error as msg:
sys.stdout = sys.stderr
print(msg)
print("usage: quopri [-t | -d] [file] ...")
print("-t: quote tabs")
print("-d: decode; default encode")
sys.exit(2)
deco = 0
tabs = 0
for o, a in opts:
if o == '-t': tabs = 1
if o == '-d': deco = 1
if tabs and deco:
sys.stdout = sys.stderr
print("-t and -d are mutually exclusive")
sys.exit(2)
if not args: args = ['-']
sts = 0
for file in args:
if file == '-':
fp = sys.stdin.buffer
else:
try:
fp = open(file, "rb")
except OSError as msg:
sys.stderr.write("%s: can't open (%s)\n" % (file, msg))
sts = 1
continue
try:
if deco:
decode(fp, sys.stdout.buffer)
else:
encode(fp, sys.stdout.buffer, tabs)
finally:
if file != '-':
fp.close()
if sts:
sys.exit(sts)
if __name__ == '__main__':
main()
| lgpl-3.0 |
CyanogenMod/android_kernel_motorola_msm8960-common | tools/perf/python/twatch.py | 3213 | 1338 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <[email protected]>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
stephenmcd/drawnby | core/migrations/0004_auto__chg_field_drawing_data.py | 2 | 4403 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'Drawing.data'
db.alter_column('core_drawing', 'data', self.gf('django.db.models.fields.TextField')(default=''))
def backwards(self, orm):
# Changing field 'Drawing.data'
db.alter_column('core_drawing', 'data', self.gf('django.db.models.fields.TextField')(null=True))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.drawing': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Drawing'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'db_index': 'True', 'max_length': '50', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'})
}
}
complete_apps = ['core']
| bsd-2-clause |
Srisai85/scipy | scipy/optimize/tests/test_cobyla.py | 100 | 3562 | from __future__ import division, print_function, absolute_import
import math
import numpy as np
from numpy.testing import assert_allclose, TestCase, run_module_suite, \
assert_
from scipy.optimize import fmin_cobyla, minimize
class TestCobyla(TestCase):
def setUp(self):
self.x0 = [4.95, 0.66]
self.solution = [math.sqrt(25 - (2.0/3)**2), 2.0/3]
self.opts = {'disp': False, 'rhobeg': 1, 'tol': 1e-5,
'maxiter': 100}
def fun(self, x):
return x[0]**2 + abs(x[1])**3
def con1(self, x):
return x[0]**2 + x[1]**2 - 25
def con2(self, x):
return -self.con1(x)
def test_simple(self):
x = fmin_cobyla(self.fun, self.x0, [self.con1, self.con2], rhobeg=1,
rhoend=1e-5, iprint=0, maxfun=100)
assert_allclose(x, self.solution, atol=1e-4)
def test_minimize_simple(self):
# Minimize with method='COBYLA'
cons = ({'type': 'ineq', 'fun': self.con1},
{'type': 'ineq', 'fun': self.con2})
sol = minimize(self.fun, self.x0, method='cobyla', constraints=cons,
options=self.opts)
assert_allclose(sol.x, self.solution, atol=1e-4)
assert_(sol.success, sol.message)
assert_(sol.maxcv < 1e-5, sol)
assert_(sol.nfev < 70, sol)
assert_(sol.fun < self.fun(self.solution) + 1e-3, sol)
def test_minimize_constraint_violation(self):
np.random.seed(1234)
pb = np.random.rand(10, 10)
spread = np.random.rand(10)
def p(w):
return pb.dot(w)
def f(w):
return -(w * spread).sum()
def c1(w):
return 500 - abs(p(w)).sum()
def c2(w):
return 5 - abs(p(w).sum())
def c3(w):
return 5 - abs(p(w)).max()
cons = ({'type': 'ineq', 'fun': c1},
{'type': 'ineq', 'fun': c2},
{'type': 'ineq', 'fun': c3})
w0 = np.zeros((10, 1))
sol = minimize(f, w0, method='cobyla', constraints=cons,
options={'catol': 1e-6})
assert_(sol.maxcv > 1e-6)
assert_(not sol.success)
def test_vector_constraints():
# test that fmin_cobyla and minimize can take a combination
# of constraints, some returning a number and others an array
def fun(x):
return (x[0] - 1)**2 + (x[1] - 2.5)**2
def fmin(x):
return fun(x) - 1
def cons1(x):
a = np.array([[1, -2, 2], [-1, -2, 6], [-1, 2, 2]])
return np.array([a[i, 0] * x[0] + a[i, 1] * x[1] +
a[i, 2] for i in range(len(a))])
def cons2(x):
return x # identity, acts as bounds x > 0
x0 = np.array([2, 0])
cons_list = [fun, cons1, cons2]
xsol = [1.4, 1.7]
fsol = 0.8
# testing fmin_cobyla
sol = fmin_cobyla(fun, x0, cons_list, rhoend=1e-5, iprint=0)
assert_allclose(sol, xsol, atol=1e-4)
sol = fmin_cobyla(fun, x0, fmin, rhoend=1e-5, iprint=0)
assert_allclose(fun(sol), 1, atol=1e-4)
# testing minimize
constraints = [{'type': 'ineq', 'fun': cons} for cons in cons_list]
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.x, xsol, atol=1e-4)
assert_(sol.success, sol.message)
assert_allclose(sol.fun, fsol, atol=1e-4)
constraints = {'type': 'ineq', 'fun': fmin}
sol = minimize(fun, x0, constraints=constraints, tol=1e-5)
assert_allclose(sol.fun, 1, atol=1e-4)
if __name__ == "__main__":
run_module_suite()
| bsd-3-clause |
a-parhom/edx-platform | openedx/features/course_experience/utils.py | 2 | 6659 | """
Common utilities for the course experience, including course outline.
"""
from completion.models import BlockCompletion
from lms.djangoapps.course_api.blocks.api import get_blocks
from lms.djangoapps.course_blocks.utils import get_student_module_as_dict
from opaque_keys.edx.keys import CourseKey
from openedx.core.lib.cache_utils import request_cached
from xmodule.modulestore.django import modulestore
@request_cached()
def get_course_outline_block_tree(request, course_id, user=None):
"""
Returns the root block of the course outline, with children as blocks.
"""
assert user is None or user.is_authenticated
def populate_children(block, all_blocks):
"""
Replace each child id with the full block for the child.
Given a block, replaces each id in its children array with the full
representation of that child, which will be looked up by id in the
passed all_blocks dict. Recursively do the same replacement for children
of those children.
"""
children = block.get('children', [])
for i in range(len(children)):
child_id = block['children'][i]
child_detail = populate_children(all_blocks[child_id], all_blocks)
block['children'][i] = child_detail
return block
def set_last_accessed_default(block):
"""
Set default of False for resume_block on all blocks.
"""
block['resume_block'] = False
block['complete'] = False
for child in block.get('children', []):
set_last_accessed_default(child)
def mark_blocks_completed(block, user, course_key):
"""
Walk course tree, marking block completion.
Mark 'most recent completed block as 'resume_block'
"""
last_completed_child_position = BlockCompletion.get_latest_block_completed(user, course_key)
if last_completed_child_position:
# Mutex w/ NOT 'course_block_completions'
recurse_mark_complete(
course_block_completions=BlockCompletion.get_course_completions(user, course_key),
latest_completion=last_completed_child_position,
block=block
)
def recurse_mark_complete(course_block_completions, latest_completion, block):
"""
Helper function to walk course tree dict,
marking blocks as 'complete' and 'last_complete'
If all blocks are complete, mark parent block complete
mark parent blocks of 'last_complete' as 'last_complete'
:param course_block_completions: dict[course_completion_object] = completion_value
:param latest_completion: course_completion_object
:param block: course_outline_root_block block object or child block
:return:
block: course_outline_root_block block object or child block
"""
block_key = block.serializer.instance
if course_block_completions.get(block_key):
block['complete'] = True
if block_key == latest_completion.full_block_key:
block['resume_block'] = True
if block.get('children'):
for idx in range(len(block['children'])):
recurse_mark_complete(
course_block_completions,
latest_completion,
block=block['children'][idx]
)
if block['children'][idx]['resume_block'] is True:
block['resume_block'] = True
completable_blocks = [child for child in block['children']
if child['type'] != 'discussion']
if len([child['complete'] for child in block['children']
if child['complete']]) == len(completable_blocks):
block['complete'] = True
def mark_last_accessed(user, course_key, block):
"""
Recursively marks the branch to the last accessed block.
"""
block_key = block.serializer.instance
student_module_dict = get_student_module_as_dict(user, course_key, block_key)
last_accessed_child_position = student_module_dict.get('position')
if last_accessed_child_position and block.get('children'):
block['resume_block'] = True
if last_accessed_child_position <= len(block['children']):
last_accessed_child_block = block['children'][last_accessed_child_position - 1]
last_accessed_child_block['resume_block'] = True
mark_last_accessed(user, course_key, last_accessed_child_block)
else:
# We should be using an id in place of position for last accessed.
# However, while using position, if the child block is no longer accessible
# we'll use the last child.
block['children'][-1]['resume_block'] = True
course_key = CourseKey.from_string(course_id)
course_usage_key = modulestore().make_course_usage_key(course_key)
# Deeper query for course tree traversing/marking complete
# and last completed block
block_types_filter = [
'course',
'chapter',
'sequential',
'vertical',
'html',
'problem',
'video',
'discussion',
'drag-and-drop-v2',
'poll',
'word_cloud'
]
all_blocks = get_blocks(
request,
course_usage_key,
user=request.user,
nav_depth=3,
requested_fields=[
'children',
'display_name',
'type',
'due',
'graded',
'special_exam_info',
'show_gated_sections',
'format'
],
block_types_filter=block_types_filter
)
course_outline_root_block = all_blocks['blocks'].get(all_blocks['root'], None)
if course_outline_root_block:
populate_children(course_outline_root_block, all_blocks['blocks'])
if user:
set_last_accessed_default(course_outline_root_block)
mark_blocks_completed(
block=course_outline_root_block,
user=request.user,
course_key=course_key
)
return course_outline_root_block
def get_resume_block(block):
"""
Gets the deepest block marked as 'resume_block'.
"""
if not block['resume_block']:
return None
if not block.get('children'):
return block
for child in block['children']:
resume_block = get_resume_block(child)
if resume_block:
return resume_block
return block
| agpl-3.0 |
tacaswell/bokeh | bokeh/io.py | 39 | 14856 | #-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2015, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Functions for configuring Bokeh output.
'''
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
# Stdlib imports
import logging
logger = logging.getLogger(__name__)
import io, itertools, os, warnings
# Third-party imports
# Bokeh imports
from . import browserlib
from .document import Document
from .embed import notebook_div, file_html, autoload_server
from .models import Widget
from .models.plots import GridPlot
from .models.widgets.layouts import HBox, VBox, VBoxForm
from .state import State
from .util.notebook import load_notebook, publish_display_data
from .util.string import decode_utf8
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
_new_param = {'tab': 2, 'window': 1}
_state = State()
#-----------------------------------------------------------------------------
# Local utilities
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
def output_file(filename, title="Bokeh Plot", autosave=False, mode="inline", root_dir=None):
''' Configure the default output state to generate output saved
to a file when :func:`show` is called.
Args:
filename (str) : a filename for saving the HTML document
title (str, optional) : a title for the HTML document (default: "Bokeh Plot")
autosave (bool, optional) : whether to automatically save (default: False)
If True, then Bokeh plotting APIs may opt to automatically
save the file more frequently (e.g., after any plotting
command). If False, then the file is only saved upon calling
:func:`show` or :func:`save`.
mode (str, optional) : how to include BokehJS (default: ``'inline'``)
One of: ``'inline'``, ``'cdn'``, ``'relative(-dev)'`` or
``'absolute(-dev)'``. See :class:`bokeh.resources.Resources` for more details.
root_dir (str, optional) : root directory to use for 'absolute' resources. (default: None)
This value is ignored for other resource types, e.g. ``INLINE`` or
``CDN``.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
This output file will be overwritten on every save, e.g., each time
show() or save() is invoked, or any time a Bokeh plotting API
causes a save, if ``autosave`` is True.
'''
_state.output_file(
filename,
title=title,
autosave=autosave,
mode=mode,
root_dir=root_dir
)
def output_notebook(url=None, docname=None, session=None, name=None,
resources=None, verbose=False, hide_banner=False):
''' Configure the default output state to generate output in
Jupyter/IPython notebook cells when :func:`show` is called.
Args:
url (str, optional) : URL of the Bokeh server (default: "default")
If "default", then ``session.DEFAULT_SERVER_URL`` is used.
docname (str) : Name of document to push on Bokeh server (default: None)
Any existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If None, a new default session is created.
name (str, optional) : A name for the session (default: None)
If None, the server URL is used as the name
resources (Resource, optional) :
How and where to load BokehJS from (default: INLINE)
verbose (bool, optional) :
whether to display detailed BokehJS banner (default: False)
hide_banner (bool, optional):
whether to hide the Bokeh banner (default: False)
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
'''
load_notebook(resources, verbose, hide_banner)
_state.output_notebook(
url=url, docname=docname, session=session, name=name
)
def output_server(docname, session=None, url="default", name=None, clear=True):
''' Configure the default output state to generate output that gets
pushed to a bokeh-server when :func:`show` or :func:`push` is called.
Args:
docname (str) : Name of document to push on Bokeh server
Any existing documents with the same name will be overwritten.
session (Session, optional) : An explicit session to use (default: None)
If None, a new default session is created.
url (str, optional) : URL of the Bokeh server (default: "default")
If "default", then ``session.DEFAULT_SERVER_URL`` is used.
name (str, optional) : A name for the session (default: None)
If None, the server URL is used as the name
clear (bool, optional) : Whether to clear the document (default: True)
If True, an existing server document will be cleared of any
existing objects.
Returns:
None
.. note::
Generally, this should be called at the beginning of an interactive
session or the top of a script.
.. warning::
Calling this function will replace any existing default session.
'''
_state.output_server(
docname, session=session, url=url, name=name, clear=clear
)
def curdoc():
''' Return the document for the current default state.
Returns:
doc : the current default document object.
.. note::
When using this API form within the server (e.g. in a Bokeh app), the
appropriate document from the request context is returned, rather than
the standard default global state. Doing so allows the same code using
curdoc() to function correctly whether it is being run inside a server
or not.
'''
try:
from flask import request
doc = request.bokeh_server_document
logger.debug("curdoc() returning Document from flask request context")
return doc
except (ImportError, RuntimeError, AttributeError):
return _state.document
def cursession():
''' Return the session for the current default state, if there is one.
Returns:
the current default Session object (or None)
'''
return _state.session
def show(obj, browser=None, new="tab"):
''' Immediately display a plot object.
In an IPython/Jupyter notebook, the output is displayed in an output
cell. Otherwise, a browser window or tab is autoraised to display the
plot object.
If both a server session and notebook output have been configured on
the default output state then the notebook output will be generated to
load the plot from that server session.
Args:
obj (Widget/Plot object) : a plot object to display
browser (str, optional) : browser to show with (default: None)
For systems that support it, the **browser** argument allows
specifying which browser to display in, e.g. "safari", "firefox",
"opera", "windows-default" (see the ``webbrowser`` module
documentation in the standard lib for more details).
new (str, optional) : new file output mode (default: "tab")
For file-based output, opens or raises the browser window
showing the current output file. If **new** is 'tab', then
opens a new tab. If **new** is 'window', then opens a new window.
Returns:
None
.. note::
The ``browser`` and ``new`` parameters are ignored when showing in
an IPython/Jupyter notebook.
'''
_show_with_state(obj, _state, browser, new)
def _show_with_state(obj, state, browser, new):
controller = browserlib.get_browser_controller(browser=browser)
if state.notebook:
_show_notebook_with_state(obj, state)
elif state.session:
_show_server_with_state(obj, state, new, controller)
if state.file:
_show_file_with_state(obj, state, new, controller)
def _show_file_with_state(obj, state, new, controller):
save(obj, state=state)
controller.open("file://" + os.path.abspath(state.file['filename']), new=_new_param[new])
def _show_notebook_with_state(obj, state):
if state.session:
push(state=state)
snippet = autoload_server(obj, state.session)
publish_display_data({'text/html': snippet})
else:
publish_display_data({'text/html': notebook_div(obj)})
def _show_server_with_state(obj, state, new, controller):
push(state=state)
controller.open(state.session.object_link(state.document.context), new=_new_param[new])
def save(obj, filename=None, resources=None, title=None, state=None):
''' Save an HTML file with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``filename``, ``resources``, or ``title`` if they
are not provided.
Args:
obj (Document or Widget/Plot object) : a plot object to save
filename (str, optional) : filename to save document under (default: None)
If None, use the default state configuration, otherwise raise a
``RuntimeError``.
resources (Resources, optional) : A Resources config to use (default: None)
If None, use the default state configuration, if there is one.
otherwise use ``resources.INLINE``.
title (str, optional) : a title for the HTML document (default: None)
If None, use the default state title value, if there is one.
Otherwise, use "Bokeh Plot"
Returns:
None
Raises:
RuntimeError
'''
if state is None:
state = _state
filename, resources, title = _get_save_args(state, filename, resources, title)
_save_helper(obj, filename, resources, title)
def _get_save_args(state, filename, resources, title):
if filename is None and state.file:
filename = state.file['filename']
if resources is None and state.file:
resources = state.file['resources']
if title is None and state.file:
title = state.file['title']
if filename is None:
raise RuntimeError("save() called but no filename was supplied and output_file(...) was never called, nothing saved")
if resources is None:
warnings.warn("save() called but no resources was supplied and output_file(...) was never called, defaulting to resources.INLINE")
from .resources import INLINE
resources = INLINE
if title is None:
warnings.warn("save() called but no title was supplied and output_file(...) was never called, using default title 'Bokeh Plot'")
title = "Bokeh Plot"
return filename, resources, title
def _save_helper(obj, filename, resources, title):
# TODO: (bev) Widget seems awkward as a base class to check here
if isinstance(obj, Widget):
doc = Document()
doc.add(obj)
elif isinstance(obj, Document):
doc = obj
else:
raise RuntimeError("Unable to save object of type '%s'" % type(obj))
html = file_html(doc, resources, title)
with io.open(filename, "w", encoding="utf-8") as f:
f.write(decode_utf8(html))
def push(session=None, document=None, state=None):
''' Update the server with the data for the current document.
Will fall back to the default output state (or an explicitly provided
:class:`State` object) for ``session`` or ``document`` if they are not
provided.
Args:
session (Session, optional) : a Bokeh server session to push objects to
document (Document, optional) : A :class:`bokeh.document.Document` to use
Returns:
None
'''
if state is None:
state = _state
if not session:
session = state.session
if not document:
document = state.document
if not session:
warnings.warn("push() called but no session was supplied and output_server(...) was never called, nothing pushed")
return
return session.store_document(document)
def reset_output(state=None):
''' Clear the default state of all output modes.
Returns:
None
'''
_state.reset()
def _deduplicate_plots(plot, subplots):
doc = _state.document
doc.context.children = list(set(doc.context.children) - set(subplots))
doc.add(plot)
doc._current_plot = plot # TODO (bev) don't use private attrs
def _push_or_save(obj):
if _state.session and _state.document.autostore:
push()
if _state.file and _state.file['autosave']:
save(obj)
def gridplot(plot_arrangement, **kwargs):
''' Generate a plot that arranges several subplots into a grid.
Args:
plot_arrangement (nested list of Plots) : plots to arrange in a grid
**kwargs: additional attributes to pass in to GridPlot() constructor
.. note:: ``plot_arrangement`` can be nested, e.g [[p1, p2], [p3, p4]]
Returns:
grid_plot: a new :class:`GridPlot <bokeh.models.plots.GridPlot>`
'''
grid = GridPlot(children=plot_arrangement, **kwargs)
subplots = itertools.chain.from_iterable(plot_arrangement)
_deduplicate_plots(grid, subplots)
_push_or_save(grid)
return grid
def hplot(*children, **kwargs):
''' Generate a layout that arranges several subplots horizontally.
'''
layout = HBox(children=list(children), **kwargs)
_deduplicate_plots(layout, children)
_push_or_save(layout)
return layout
def vplot(*children, **kwargs):
''' Generate a layout that arranges several subplots vertically.
'''
layout = VBox(children=list(children), **kwargs)
_deduplicate_plots(layout, children)
_push_or_save(layout)
return layout
def vform(*children, **kwargs):
''' Generate a layout that arranges several subplots vertically.
'''
layout = VBoxForm(children=list(children), **kwargs)
_push_or_save(layout)
return layout
| bsd-3-clause |
grongor/school_rfid | lib/nmap-6.40/zenmap/radialnet/gui/RadialNet.py | 1 | 67333 | # vim: set encoding=utf-8 :
# ***********************IMPORTANT NMAP LICENSE TERMS************************
# * *
# * The Nmap Security Scanner is (C) 1996-2013 Insecure.Com LLC. Nmap is *
# * also a registered trademark of Insecure.Com LLC. This program is free *
# * software; you may redistribute and/or modify it under the terms of the *
# * GNU General Public License as published by the Free Software *
# * Foundation; Version 2 ("GPL"), BUT ONLY WITH ALL OF THE CLARIFICATIONS *
# * AND EXCEPTIONS DESCRIBED HEREIN. This guarantees your right to use, *
# * modify, and redistribute this software under certain conditions. If *
# * you wish to embed Nmap technology into proprietary software, we sell *
# * alternative licenses (contact [email protected]). Dozens of software *
# * vendors already license Nmap technology such as host discovery, port *
# * scanning, OS detection, version detection, and the Nmap Scripting *
# * Engine. *
# * *
# * Note that the GPL places important restrictions on "derivative works", *
# * yet it does not provide a detailed definition of that term. To avoid *
# * misunderstandings, we interpret that term as broadly as copyright law *
# * allows. For example, we consider an application to constitute a *
# * derivative work for the purpose of this license if it does any of the *
# * following with any software or content covered by this license *
# * ("Covered Software"): *
# * *
# * o Integrates source code from Covered Software. *
# * *
# * o Reads or includes copyrighted data files, such as Nmap's nmap-os-db *
# * or nmap-service-probes. *
# * *
# * o Is designed specifically to execute Covered Software and parse the *
# * results (as opposed to typical shell or execution-menu apps, which will *
# * execute anything you tell them to). *
# * *
# * o Includes Covered Software in a proprietary executable installer. The *
# * installers produced by InstallShield are an example of this. Including *
# * Nmap with other software in compressed or archival form does not *
# * trigger this provision, provided appropriate open source decompression *
# * or de-archiving software is widely available for no charge. For the *
# * purposes of this license, an installer is considered to include Covered *
# * Software even if it actually retrieves a copy of Covered Software from *
# * another source during runtime (such as by downloading it from the *
# * Internet). *
# * *
# * o Links (statically or dynamically) to a library which does any of the *
# * above. *
# * *
# * o Executes a helper program, module, or script to do any of the above. *
# * *
# * This list is not exclusive, but is meant to clarify our interpretation *
# * of derived works with some common examples. Other people may interpret *
# * the plain GPL differently, so we consider this a special exception to *
# * the GPL that we apply to Covered Software. Works which meet any of *
# * these conditions must conform to all of the terms of this license, *
# * particularly including the GPL Section 3 requirements of providing *
# * source code and allowing free redistribution of the work as a whole. *
# * *
# * As another special exception to the GPL terms, Insecure.Com LLC grants *
# * permission to link the code of this program with any version of the *
# * OpenSSL library which is distributed under a license identical to that *
# * listed in the included docs/licenses/OpenSSL.txt file, and distribute *
# * linked combinations including the two. *
# * *
# * Any redistribution of Covered Software, including any derived works, *
# * must obey and carry forward all of the terms of this license, including *
# * obeying all GPL rules and restrictions. For example, source code of *
# * the whole work must be provided and free redistribution must be *
# * allowed. All GPL references to "this License", are to be treated as *
# * including the special and conditions of the license text as well. *
# * *
# * Because this license imposes special exceptions to the GPL, Covered *
# * Work may not be combined (even as part of a larger work) with plain GPL *
# * software. The terms, conditions, and exceptions of this license must *
# * be included as well. This license is incompatible with some other open *
# * source licenses as well. In some cases we can relicense portions of *
# * Nmap or grant special permissions to use it in other open source *
# * software. Please contact [email protected] with any such requests. *
# * Similarly, we don't incorporate incompatible open source software into *
# * Covered Software without special permission from the copyright holders. *
# * *
# * If you have any questions about the licensing restrictions on using *
# * Nmap in other works, are happy to help. As mentioned above, we also *
# * offer alternative license to integrate Nmap into proprietary *
# * applications and appliances. These contracts have been sold to dozens *
# * of software vendors, and generally include a perpetual license as well *
# * as providing for priority support and updates. They also fund the *
# * continued development of Nmap. Please email [email protected] for *
# * further information. *
# * *
# * If you received these files with a written license agreement or *
# * contract stating terms other than the terms above, then that *
# * alternative license agreement takes precedence over these comments. *
# * *
# * Source is provided to this software because we believe users have a *
# * right to know exactly what a program is going to do before they run it. *
# * This also allows you to audit the software for security holes (none *
# * have been found so far). *
# * *
# * Source code also allows you to port Nmap to new platforms, fix bugs, *
# * and add new features. You are highly encouraged to send your changes *
# * to the [email protected] mailing list for possible incorporation into the *
# * main distribution. By sending these changes to Fyodor or one of the *
# * Insecure.Org development mailing lists, or checking them into the Nmap *
# * source code repository, it is understood (unless you specify otherwise) *
# * that you are offering the Nmap Project (Insecure.Com LLC) the *
# * unlimited, non-exclusive right to reuse, modify, and relicense the *
# * code. Nmap will always be available Open Source, but this is important *
# * because the inability to relicense code has caused devastating problems *
# * for other Free Software projects (such as KDE and NASM). We also *
# * occasionally relicense the code to third parties as discussed above. *
# * If you wish to specify special license conditions of your *
# * contributions, just say so when you send them. *
# * *
# * This program is distributed in the hope that it will be useful, but *
# * WITHOUT ANY WARRANTY; without even the implied warranty of *
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the Nmap *
# * license file for more details (it's in a COPYING file included with *
# * Nmap, and also available from https://svn.nmap.org/nmap/COPYING *
# * *
# ***************************************************************************/
import gtk
import math
import time
import copy
import cairo
import gobject
import radialnet.util.drawing as drawing
import radialnet.util.geometry as geometry
import radialnet.util.misc as misc
from radialnet.core.Coordinate import PolarCoordinate, CartesianCoordinate
from radialnet.core.Interpolation import Linear2DInterpolator
from radialnet.core.Graph import Graph, Node
from radialnet.gui.NodeWindow import NodeWindow
from radialnet.gui.Image import Icons, get_pixels_for_cairo_image_surface
from zenmapCore.BasePaths import fs_enc
REGION_COLORS = [(1.0, 0.0, 0.0), (1.0, 1.0, 0.0), (0.0, 1.0, 0.0)]
REGION_RED = 0
REGION_YELLOW = 1
REGION_GREEN = 2
SQUARE_TYPES = ['router', 'switch', 'wap']
ICON_DICT = {'router': 'router',
'switch': 'switch',
'wap': 'wireless',
'firewall': 'firewall'}
POINTER_JUMP_TO = 0
POINTER_INFO = 1
POINTER_GROUP = 2
POINTER_FILL = 3
LAYOUT_SYMMETRIC = 0
LAYOUT_WEIGHTED = 1
INTERPOLATION_CARTESIAN = 0
INTERPOLATION_POLAR = 1
FILE_TYPE_PDF = 1
FILE_TYPE_PNG = 2
FILE_TYPE_PS = 3
FILE_TYPE_SVG = 4
class RadialNet(gtk.DrawingArea):
"""
Radial network visualization widget
"""
def __init__(self, layout=LAYOUT_SYMMETRIC):
"""
Constructor method of RadialNet widget class
@type number_of_rings: number
@param number_of_rings: Number of rings in radial layout
"""
self.__center_of_widget = (0, 0)
self.__graph = None
self.__number_of_rings = 0
self.__ring_gap = 30
self.__min_ring_gap = 10
self.__layout = layout
self.__interpolation = INTERPOLATION_POLAR
self.__interpolation_slow_in_out = True
self.__animating = False
self.__animation_rate = 1000 / 60 # 60Hz (human perception factor)
self.__number_of_frames = 60
self.__scale = 1.0
self.__rotate = 225 # rotated so that single-host traceroute doesn't have overlapping hosts
self.__translation = (0, 0)
self.__button1_press = False
self.__button2_press = False
self.__button3_press = False
self.__last_motion_point = None
self.__fisheye = False
self.__fisheye_ring = 0
self.__fisheye_spread = 0.5
self.__fisheye_interest = 2
self.__show_address = True
self.__show_hostname = True
self.__show_icon = True
self.__show_latency = False
self.__show_ring = True
self.__show_region = True
self.__region_color = REGION_RED
self.__node_views = dict()
self.__last_group_node = None
self.__pointer_status = POINTER_JUMP_TO
self.__sorted_nodes = list()
self.__icon = Icons()
super(RadialNet, self).__init__()
self.connect('expose_event', self.expose)
self.connect('button_press_event', self.button_press)
self.connect('button_release_event', self.button_release)
self.connect('motion_notify_event', self.motion_notify)
self.connect('enter_notify_event', self.enter_notify)
self.connect('leave_notify_event', self.leave_notify)
self.connect('key_press_event', self.key_press)
self.connect('key_release_event', self.key_release)
self.connect('scroll_event', self.scroll_event)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.ENTER_NOTIFY |
gtk.gdk.LEAVE_NOTIFY |
gtk.gdk.MOTION_NOTIFY |
gtk.gdk.NOTHING |
gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.SCROLL_MASK)
self.set_flags(gtk.CAN_FOCUS)
self.grab_focus()
def graph_is_not_empty(function):
"""
Decorator function to prevent the execution when graph not is set
@type function: function
@param function: Protected function
"""
def check_graph_status(*args):
if args[0].__graph == None:
return False
return function(*args)
return check_graph_status
def not_is_in_animation(function):
"""
Decorator function to prevent the execution when graph is animating
@type function: function
@param function: Protected function
"""
def check_animation_status(*args):
if args[0].__animating == True:
return False
return function(*args)
return check_animation_status
def save_drawing_to_file(self, file, type=FILE_TYPE_PNG):
"""
"""
allocation = self.get_allocation()
if type == FILE_TYPE_PDF:
self.surface = cairo.PDFSurface(file,
allocation.width,
allocation.height)
elif type == FILE_TYPE_PNG:
self.surface = cairo.ImageSurface(cairo.FORMAT_ARGB32,
allocation.width,
allocation.height)
elif type == FILE_TYPE_PS:
self.surface = cairo.PSSurface(file,
allocation.width,
allocation.height)
elif type == FILE_TYPE_SVG:
self.surface = cairo.SVGSurface(file,
allocation.width,
allocation.height)
else:
raise TypeError, 'unknown surface type'
context = cairo.Context(self.surface)
context.rectangle(0, 0, allocation.width, allocation.height)
context.set_source_rgb(1.0, 1.0, 1.0)
context.fill()
self.__draw(context)
if type == FILE_TYPE_PNG:
# write_to_png requires a str, not unicode, in py2cairo 1.8.10 and
# earlier.
self.surface.write_to_png(fs_enc(file))
self.surface.flush()
self.surface.finish()
return True
def get_slow_inout(self):
"""
"""
return self.__interpolation_slow_in_out
def set_slow_inout(self, value):
"""
"""
self.__interpolation_slow_in_out = value
def get_region_color(self):
"""
"""
return self.__region_color
def set_region_color(self, value):
"""
"""
self.__region_color = value
def get_show_region(self):
"""
"""
return self.__show_region
def set_show_region(self, value):
"""
"""
self.__show_region = value
self.queue_draw()
def get_pointer_status(self):
"""
"""
return self.__pointer_status
def set_pointer_status(self, pointer_status):
"""
"""
self.__pointer_status = pointer_status
def get_show_address(self):
"""
"""
return self.__show_address
def get_show_hostname(self):
"""
"""
return self.__show_hostname
def get_show_ring(self):
"""
"""
return self.__show_ring
def set_show_address(self, value):
"""
"""
self.__show_address = value
self.queue_draw()
def set_show_hostname(self, value):
"""
"""
self.__show_hostname = value
self.queue_draw()
def set_show_ring(self, value):
"""
"""
self.__show_ring = value
self.queue_draw()
def get_min_ring_gap(self):
"""
"""
return self.__min_ring_gap
@graph_is_not_empty
@not_is_in_animation
def set_min_ring_gap(self, value):
"""
"""
self.__min_ring_gap = int(value)
if self.__ring_gap < self.__min_ring_gap:
self.__ring_gap = self.__min_ring_gap
self.__update_nodes_positions()
self.queue_draw()
return True
def get_number_of_frames(self):
"""
"""
return self.__number_of_frames
@not_is_in_animation
def set_number_of_frames(self, number_of_frames):
"""
"""
if number_of_frames > 2:
self.__number_of_frames = int(number_of_frames)
return True
self.__number_of_frames = 3
return False
@not_is_in_animation
def update_layout(self):
"""
"""
if self.__graph is None:
return
self.__animating = True
self.__calc_interpolation(self.__graph.get_main_node())
self.__livens_up()
@not_is_in_animation
def set_layout(self, layout):
"""
"""
if self.__layout != layout:
self.__layout = layout
if self.__graph != None:
self.__animating = True
self.__calc_interpolation(self.__graph.get_main_node())
self.__livens_up()
return True
return False
def get_layout(self):
"""
"""
return self.__layout
@not_is_in_animation
def set_interpolation(self, interpolation):
"""
"""
self.__interpolation = interpolation
return True
def get_interpolation(self):
"""
"""
return self.__interpolation
def get_number_of_rings(self):
"""
"""
return self.__number_of_rings
def get_fisheye_ring(self):
"""
"""
return self.__fisheye_ring
def get_fisheye_interest(self):
"""
"""
return self.__fisheye_interest
def get_fisheye_spread(self):
"""
"""
return self.__fisheye_spread
def get_fisheye(self):
"""
"""
return self.__fisheye
def set_fisheye(self, enable):
"""
"""
self.__fisheye = enable
self.__update_nodes_positions()
self.queue_draw()
def set_fisheye_ring(self, value):
"""
"""
self.__fisheye_ring = value
self.__check_fisheye_ring()
self.__update_nodes_positions()
self.queue_draw()
def set_fisheye_interest(self, value):
"""
"""
self.__fisheye_interest = value
self.__update_nodes_positions()
self.queue_draw()
def set_fisheye_spread(self, value):
"""
"""
self.__fisheye_spread = value
self.__update_nodes_positions()
self.queue_draw()
def get_show_icon(self):
"""
"""
return self.__show_icon
def set_show_icon(self, value):
"""
"""
self.__show_icon = value
self.queue_draw()
def get_show_latency(self):
"""
"""
return self.__show_latency
def set_show_latency(self, value):
"""
"""
self.__show_latency = value
self.queue_draw()
def get_scale(self):
"""
"""
return self.__scale
def get_zoom(self):
"""
"""
return int(round(self.__scale * 100))
def set_scale(self, scale):
"""
"""
if scale >= 0.01:
self.__scale = scale
self.queue_draw()
def set_zoom(self, zoom):
"""
"""
if float(zoom) >= 1:
self.set_scale( float(zoom) / 100.0 )
self.queue_draw()
def get_ring_gap(self):
"""
"""
return self.__ring_gap
@not_is_in_animation
def set_ring_gap(self, ring_gap):
"""
"""
if ring_gap >= self.__min_ring_gap:
self.__ring_gap = ring_gap
self.__update_nodes_positions()
self.queue_draw()
def scroll_event(self, widget, event):
"""
"""
if event.direction == gtk.gdk.SCROLL_UP:
self.set_scale(self.__scale + 0.01)
if event.direction == gtk.gdk.SCROLL_DOWN:
self.set_scale(self.__scale - 0.01)
self.queue_draw()
@graph_is_not_empty
@not_is_in_animation
def key_press(self, widget, event):
"""
"""
key = gtk.gdk.keyval_name(event.keyval)
if key == 'KP_Add':
self.set_ring_gap(self.__ring_gap + 1)
elif key == 'KP_Subtract':
self.set_ring_gap(self.__ring_gap - 1)
elif key == 'Page_Up':
self.set_scale(self.__scale + 0.01)
elif key == 'Page_Down':
self.set_scale(self.__scale - 0.01)
self.queue_draw()
return True
@graph_is_not_empty
def key_release(self, widget, event):
"""
"""
key = gtk.gdk.keyval_name(event.keyval)
if key == 'c':
self.__translation = (0, 0)
elif key == 'r':
self.__show_ring = not self.__show_ring
elif key == 'a':
self.__show_address = not self.__show_address
elif key == 'h':
self.__show_hostname = not self.__show_hostname
elif key == 'i':
self.__show_icon = not self.__show_icon
elif key == 'l':
self.__show_latency = not self.__show_latency
self.queue_draw()
return True
@graph_is_not_empty
@not_is_in_animation
def enter_notify(self, widget, event):
"""
"""
self.grab_focus()
return False
@graph_is_not_empty
@not_is_in_animation
def leave_notify(self, widget, event):
"""
"""
for node in self.__graph.get_nodes():
node.set_draw_info({'over':False})
self.queue_draw()
return False
@graph_is_not_empty
def button_press(self, widget, event):
"""
Drawing callback
@type widget: GtkWidget
@param widget: Gtk widget superclass
@type event: GtkEvent
@param event: Gtk event of widget
@rtype: boolean
@return: Indicator of the event propagation
"""
result = self.__get_node_by_coordinate(self.get_pointer())
if event.button == 1: self.__button1_press = True
# animate if node is pressed
if self.__pointer_status == POINTER_JUMP_TO and event.button == 1:
# prevent double animation
if self.__animating == True: return False
if result != None:
node, point = result
main_node = self.__graph.get_main_node()
if node != main_node:
if node.get_draw_info('group') == True:
node.set_draw_info({'group':False})
node.set_subtree_info({'grouped':False,
'group_node':None})
self.__animating = True
self.__calc_interpolation(node)
self.__livens_up()
# group node if it's pressed
elif self.__pointer_status == POINTER_GROUP and event.button == 1:
# prevent group on animation
if self.__animating == True: return False
if result != None:
node, point = result
main_node = self.__graph.get_main_node()
if node != main_node:
if node.get_draw_info('group') == True:
node.set_draw_info({'group':False})
node.set_subtree_info({'grouped':False,
'group_node':None})
else:
self.__last_group_node = node
node.set_draw_info({'group':True})
node.set_subtree_info({'grouped':True,
'group_node':node})
self.__animating = True
self.__calc_interpolation(self.__graph.get_main_node())
self.__livens_up()
# setting to show node's region
elif self.__pointer_status == POINTER_FILL and event.button == 1:
if result != None:
node, point = result
if node.get_draw_info('region') == self.__region_color:
node.set_draw_info({'region': None})
else:
node.set_draw_info({'region': self.__region_color})
self.queue_draw()
# show node details
elif event.button == 3 or self.__pointer_status == POINTER_INFO:
if event.button == 3:
self.__button3_press = True
if result != None:
xw, yw = self.window.get_origin()
node, point = result
x, y = point
if node in self.__node_views.keys():
self.__node_views[node].present()
elif node.get_draw_info('scanned'):
view = NodeWindow(node, (int(xw + x), int(yw + y)))
def close_view(view, event, node):
view.destroy()
del self.__node_views[node]
view.connect("delete-event", close_view, node)
view.show_all()
self.__node_views[node] = view
return False
@graph_is_not_empty
def button_release(self, widget, event):
"""
Drawing callback
@type widget: GtkWidget
@param widget: Gtk widget superclass
@type event: GtkEvent
@param event: Gtk event of widget
@rtype: boolean
@return: Indicator of the event propagation
"""
if event.button == 1:
self.__button1_press = False
if event.button == 2:
self.__button2_press = False
if event.button == 3:
self.__button3_press = False
self.grab_focus()
return False
@graph_is_not_empty
def motion_notify(self, widget, event):
"""
Drawing callback
@type widget: GtkWidget
@param widget: Gtk widget superclass
@type event: GtkEvent
@param event: Gtk event of widget
@rtype: boolean
@return: Indicator of the event propagation
"""
xc, yc = self.__center_of_widget
pointer = self.get_pointer()
for node in self.__graph.get_nodes():
node.set_draw_info({'over':False})
result = self.__get_node_by_coordinate(self.get_pointer())
if result != None:
result[0].set_draw_info({'over':True})
elif self.__button1_press == True and self.__last_motion_point != None:
ax, ay = pointer
ox, oy = self.__last_motion_point
tx, ty = self.__translation
self.__translation = (tx + ax - ox, ty - ay + oy)
self.__last_motion_point = pointer
self.grab_focus()
self.queue_draw()
return False
def expose(self, widget, event):
"""
Drawing callback
@type widget: GtkWidget
@param widget: Gtk widget superclass
@type event: GtkEvent
@param event: Gtk event of widget
@rtype: boolean
@return: Indicator of the event propagation
"""
allocation = self.get_allocation()
context = widget.window.cairo_create()
context.rectangle(*event.area)
context.set_source_rgb(1.0, 1.0, 1.0)
context.fill()
self.__draw(context)
return False
@graph_is_not_empty
def __draw(self, context):
"""
Drawing method
"""
# getting allocation reference
allocation = self.get_allocation()
self.__center_of_widget = (allocation.width / 2,
allocation.height / 2)
aw, ah = allocation.width, allocation.height
xc, yc = self.__center_of_widget
ax, ay = self.__translation
# xc = 320 yc = 240
# -1.5 | -0.5 ( 480, 360)
# -1.0 | 0.0 ( 320, 240)
# -0.5 | 0.5 ( 160, 120)
# 0.0 | 1.0 ( 0, 0)
# 0.5 | 1.5 (-160, -120)
# 1.0 | 2.0 (-320, -240)
# 1.5 | 2.5 (-480, -360)
# scaling and translate
factor = -(self.__scale - 1)
context.translate(xc * factor + ax, yc * factor - ay)
if self.__scale != 1.0:
context.scale(self.__scale, self.__scale)
# drawing over node's region
if self.__show_region and not self.__animating:
for node in self.__sorted_nodes:
not_grouped = not node.get_draw_info('grouped')
if node.get_draw_info('region') != None and not_grouped:
x, y = node.get_cartesian_coordinate()
xc, yc = self.__center_of_widget
r, g, b = REGION_COLORS[node.get_draw_info('region')]
start, final = node.get_draw_info('range')
i_radius = node.get_coordinate_radius()
f_radius = self.__calc_radius(self.__number_of_rings - 1)
is_fill_all = abs(final - start) == 360
final = math.radians(final + self.__rotate)
start = math.radians(start + self.__rotate)
context.move_to(xc, yc)
context.set_source_rgba(r, g, b, 0.1)
context.new_path()
context.arc(xc, yc, i_radius, -final, -start)
context.arc_negative(xc, yc, f_radius, -start, -final)
context.close_path()
context.fill()
context.stroke()
if not is_fill_all:
context.set_source_rgb(r, g, b)
context.set_line_width(1)
xa, ya = PolarCoordinate(i_radius, final).to_cartesian()
xb, yb = PolarCoordinate(f_radius, final).to_cartesian()
context.move_to(xc + xa, yc - ya)
context.line_to(xc + xb, yc - yb)
context.stroke()
xa, ya = PolarCoordinate(i_radius, start).to_cartesian()
xb, yb = PolarCoordinate(f_radius, start).to_cartesian()
context.move_to(xc + xa, yc - ya)
context.line_to(xc + xb, yc - yb)
context.stroke()
# drawing network rings
if self.__show_ring == True and self.__animating != True:
for i in range(1, self.__number_of_rings):
radius = self.__calc_radius(i)
context.arc(xc, yc, radius, 0, 2 * math.pi)
context.set_source_rgb(0.8, 0.8, 0.8)
context.set_line_width(1)
context.stroke()
# drawing nodes and your connections
for edge in self.__graph.get_edges():
# check group constraints for edges
a, b = edge.get_nodes()
a_is_grouped = a.get_draw_info('grouped')
b_is_grouped = b.get_draw_info('grouped')
a_is_group = a.get_draw_info('group')
b_is_group = b.get_draw_info('group')
a_group = a.get_draw_info('group_node')
b_group = b.get_draw_info('group_node')
a_is_child = a in b.get_draw_info('children')
b_is_child = b in a.get_draw_info('children')
last_group = self.__last_group_node
groups = [a_group, b_group]
if last_group in groups and last_group != None:
self.__draw_edge(context, edge)
elif not a_is_grouped or not b_is_grouped:
if not (a_is_group and b_is_child or b_is_group and a_is_child):
self.__draw_edge(context, edge)
elif a_group != b_group:
self.__draw_edge(context, edge)
for node in reversed(self.__sorted_nodes):
# check group constraints for nodes
group = node.get_draw_info('group_node')
grouped = node.get_draw_info('grouped')
if group == self.__last_group_node or not grouped:
self.__draw_node(context, node)
def __draw_edge(self, context, edge):
"""
Draw the connection between two nodes
@type : Edge
@param : The second node that will be connected
"""
a, b = edge.get_nodes()
xa, ya = a.get_cartesian_coordinate()
xb, yb = b.get_cartesian_coordinate()
xc, yc = self.__center_of_widget
a_children = a.get_draw_info('children')
b_children = b.get_draw_info('children')
latency = edge.get_weights_mean()
# check if isn't an hierarchy connection
if a not in b_children and b not in a_children:
context.set_source_rgba(1.0, 0.6, 0.1, 0.8)
elif a.get_draw_info('no_route') or b.get_draw_info('no_route'):
context.set_source_rgba(0.0, 0.0, 0.0, 0.8)
else:
context.set_source_rgba(0.1, 0.5, 1.0, 0.8)
# calculating line thickness by latency
if latency != None:
min = self.__graph.get_min_edge_mean_weight()
max = self.__graph.get_max_edge_mean_weight()
if max != min:
thickness = (latency - min) * 4 / (max - min) + 1
else:
thickness = 1
context.set_line_width(thickness)
else:
context.set_dash([2, 2])
context.set_line_width(1)
context.move_to(xc + xa, yc - ya)
context.line_to(xc + xb, yc - yb)
context.stroke()
context.set_dash([1, 0])
if not self.__animating and self.__show_latency:
if latency != None:
context.set_font_size(8)
context.set_line_width(1)
context.move_to(xc + (xa + xb) / 2 + 1,
yc - (ya + yb) / 2 + 4)
context.show_text(str(round(latency, 2)))
context.stroke()
def __draw_node(self, context, node):
"""
Draw nodes and your informations
@type : NetNode
@param : The node will be draw
"""
x, y = node.get_cartesian_coordinate()
xc, yc = self.__center_of_widget
r, g, b = node.get_draw_info('color')
radius = node.get_draw_info('radius')
type = node.get_info('device_type')
x_gap = radius + 2
y_gap = 0
# draw group indication
if node.get_draw_info('group') == True:
x_gap += 5
if type in SQUARE_TYPES:
context.rectangle(xc + x - radius - 5,
yc - y - radius - 5,
2 * radius + 10,
2 * radius + 10)
else:
context.arc(xc + x, yc - y, radius + 5, 0, 2 * math.pi)
context.set_source_rgb(1.0, 1.0, 1.0)
context.fill_preserve()
if node.deep_search_child(self.__graph.get_main_node()):
context.set_source_rgb(0.0, 0.0, 0.0)
else:
context.set_source_rgb(0.1, 0.5, 1.0)
context.set_line_width(2)
context.stroke()
# draw over node
if node.get_draw_info('over') == True:
context.set_line_width(0)
if type in SQUARE_TYPES:
context.rectangle(xc + x - radius - 5,
yc - y - radius - 5,
2 * radius + 10,
2 * radius + 10)
else:
context.arc(xc + x, yc - y, radius + 5, 0, 2 * math.pi)
context.set_source_rgb(0.1, 0.5, 1.0)
context.fill_preserve()
context.stroke()
# draw node
if type in SQUARE_TYPES:
context.rectangle(xc + x - radius,
yc - y - radius,
2 * radius,
2 * radius)
else:
context.arc(xc + x, yc - y, radius, 0, 2 * math.pi)
# draw icons
if not self.__animating and self.__show_icon:
icons = list()
if type in ICON_DICT.keys():
icons.append(self.__icon.get_pixbuf(ICON_DICT[type]))
if node.get_info('filtered'):
icons.append(self.__icon.get_pixbuf('padlock'))
for icon in icons:
stride, data = get_pixels_for_cairo_image_surface(icon)
# Cairo documentation says that the correct way to obtain a
# legal stride value is using the function
# cairo.ImageSurface.format_stride_for_width().
# But this method is only available since cairo 1.6. So we are
# using the stride returned by
# get_pixels_for_cairo_image_surface() function.
surface = cairo.ImageSurface.create_for_data(data,
cairo.FORMAT_ARGB32,
icon.get_width(),
icon.get_height(),
stride)
context.set_source_surface(surface,
round(xc + x + x_gap),
round(yc - y + y_gap - 6))
context.paint()
x_gap += 13
# draw node text
context.set_source_rgb(r, g, b)
context.fill_preserve()
if node.get_draw_info('valid'):
context.set_source_rgb(0.0, 0.0, 0.0)
else:
context.set_source_rgb(0.1, 0.5, 1.0)
if not self.__animating and self.__show_address:
context.set_font_size(8)
context.move_to(round(xc + x + x_gap),
round(yc - y + y_gap + 4))
hostname = node.get_info('hostname')
if hostname != None and self.__show_hostname:
context.show_text(hostname)
elif node.get_info('ip') != None:
context.show_text(node.get_info('ip'))
context.set_line_width(1)
context.stroke()
def __check_fisheye_ring(self):
"""
"""
if self.__fisheye_ring >= self.__number_of_rings:
self.__fisheye_ring = self.__number_of_rings - 1
def __set_number_of_rings(self, value):
"""
"""
self.__number_of_rings = value
self.__check_fisheye_ring()
def __fisheye_function(self, ring):
"""
"""
distance = abs(self.__fisheye_ring - ring)
level_of_detail = self.__ring_gap * self.__fisheye_interest
spreaded_distance = distance - distance * self.__fisheye_spread
value = level_of_detail / (spreaded_distance + 1)
if value < self.__min_ring_gap:
value = self.__min_ring_gap
return value
@graph_is_not_empty
@not_is_in_animation
def __update_nodes_positions(self):
"""
"""
for node in self.__sorted_nodes:
if node.get_draw_info('grouped') == True:
# deep group check
group = node.get_draw_info('group_node')
while group.get_draw_info('group_node') != None:
group = group.get_draw_info('group_node')
ring = group.get_draw_info('ring')
node.set_coordinate_radius(self.__calc_radius(ring))
else:
ring = node.get_draw_info('ring')
node.set_coordinate_radius(self.__calc_radius(ring))
@graph_is_not_empty
def __get_node_by_coordinate(self, point):
"""
"""
xc, yc = self.__center_of_widget
for node in self.__graph.get_nodes():
if node.get_draw_info('grouped') == True:
continue
ax, ay = self.__translation
xn, yn = node.get_cartesian_coordinate()
center = (xc + xn * self.__scale + ax, yc - yn * self.__scale - ay)
radius = node.get_draw_info('radius') * self.__scale
type = node.get_info('device_type')
if type in SQUARE_TYPES:
if geometry.is_in_square(point, radius, center) == True:
return node, center
else:
if geometry.is_in_circle(point, radius, center) == True:
return node, center
return None
def __calc_radius(self, ring):
"""
"""
if self.__fisheye:
radius = 0
while ring > 0:
radius += self.__fisheye_function(ring)
ring -= 1
else:
radius = ring * self.__ring_gap
return radius
@graph_is_not_empty
def __arrange_nodes(self):
"""
"""
new_nodes = set([self.__graph.get_main_node()])
old_nodes = set()
number_of_needed_rings = 1
ring = 0
# while new nodes were found
while len(new_nodes) > 0:
tmp_nodes = set()
# for each new nodes
for node in new_nodes:
old_nodes.add(node)
# set ring location
node.set_draw_info({'ring':ring})
# check group constraints
if node.get_draw_info('group') or node.get_draw_info('grouped'):
children = node.get_draw_info('children')
else:
# getting connections and fixing multiple fathers
children = set()
for child in self.__graph.get_node_connections(node):
if child in old_nodes or child in new_nodes:
continue
if child.get_draw_info('grouped'):
continue
children.add(child)
# setting father foreign
for child in children:
child.set_draw_info({'father':node})
node.set_draw_info({'children':misc.sort_children(children, node)})
tmp_nodes.update(children)
# check group influence in number of rings
for node in tmp_nodes:
if node.get_draw_info('grouped') != True:
number_of_needed_rings += 1
break
# update new nodes set
new_nodes.update(tmp_nodes)
new_nodes.difference_update(old_nodes)
ring += 1
self.__set_number_of_rings(number_of_needed_rings)
def __weighted_layout(self):
"""
"""
# calculating the space needed by each node
self.__graph.get_main_node().set_draw_info({'range':(0, 360)})
new_nodes = set([self.__graph.get_main_node()])
self.__graph.get_main_node().calc_needed_space()
while len(new_nodes) > 0:
node = new_nodes.pop()
# add only no grouped nodes
children = set()
for child in node.get_draw_info('children'):
if child.get_draw_info('grouped') != True:
children.add(child)
new_nodes.add(child)
if len(children) > 0:
min, max = node.get_draw_info('range')
node_total = max - min
children_need = node.get_draw_info('children_need')
for child in children:
child_need = child.get_draw_info('space_need')
child_total = node_total * child_need / children_need
theta = child_total / 2 + min + self.__rotate
child.set_coordinate_theta(theta)
child.set_draw_info({'range':(min, min + child_total)})
min += child_total
def __symmetric_layout(self):
"""
"""
self.__graph.get_main_node().set_draw_info({'range':(0, 360)})
new_nodes = set([self.__graph.get_main_node()])
while len(new_nodes) > 0:
node = new_nodes.pop()
# add only no grouped nodes
children = set()
for child in node.get_draw_info('children'):
if child.get_draw_info('grouped') != True:
children.add(child)
new_nodes.add(child)
if len(children) > 0:
min, max = node.get_draw_info('range')
factor = float(max - min) / len(children)
for child in children:
theta = factor / 2 + min + self.__rotate
child.set_coordinate_theta(theta)
child.set_draw_info({'range':(min, min + factor)})
min += factor
@graph_is_not_empty
def __calc_layout(self, reference):
"""
"""
# selecting layout algorithm
if self.__layout == LAYOUT_SYMMETRIC:
self.__symmetric_layout()
elif self.__layout == LAYOUT_WEIGHTED:
self.__weighted_layout()
# rotating focus' children to keep orientation
if reference != None:
father, angle = reference
theta = father.get_coordinate_theta()
factor = theta - angle
for node in self.__graph.get_nodes():
theta = node.get_coordinate_theta()
node.set_coordinate_theta(theta - factor)
a, b = node.get_draw_info('range')
node.set_draw_info({'range':(a - factor, b - factor)})
@graph_is_not_empty
def __calc_node_positions(self, reference=None):
"""
"""
# set nodes' hierarchy
self.__arrange_nodes()
self.calc_sorted_nodes()
# set nodes' coordinate radius
for node in self.__graph.get_nodes():
ring = node.get_draw_info('ring')
node.set_coordinate_radius(self.__calc_radius(ring))
# set nodes' coordinate theta
self.__calc_layout(reference)
def __calc_interpolation(self, focus):
"""
"""
old_main_node = self.__graph.get_main_node()
self.__graph.set_main_node(focus)
# getting initial coordinates
for node in self.__graph.get_nodes():
if self.__interpolation == INTERPOLATION_POLAR:
coordinate = node.get_polar_coordinate()
elif self.__interpolation == INTERPOLATION_CARTESIAN:
coordinate = node.get_cartesian_coordinate()
node.set_draw_info({'start_coordinate':coordinate})
father = focus.get_draw_info('father')
# calculate nodes positions (and father orientation)?
if father != None:
xa, ya = father.get_cartesian_coordinate()
xb, yb = focus.get_cartesian_coordinate()
angle = math.atan2(yb - ya, xb - xa)
angle = math.degrees(angle)
self.__calc_node_positions((father, 180 + angle))
else:
self.__calc_node_positions()
# steps for slow-in/slow-out animation
steps = range(self.__number_of_frames)
for i in range(len(steps) / 2):
steps[self.__number_of_frames - 1 - i] = steps[i]
# normalize angles and calculate interpolated points
for node in self.__sorted_nodes:
l2di = Linear2DInterpolator()
# change grouped nodes coordinate
if node.get_draw_info('grouped') == True:
group_node = node.get_draw_info('group_node')
a, b = group_node.get_draw_info('final_coordinate')
if self.__interpolation == INTERPOLATION_POLAR:
node.set_polar_coordinate(a, b)
elif self.__interpolation == INTERPOLATION_CARTESIAN:
node.set_cartesian_coordinate(a, b)
# change interpolation method
if self.__interpolation == INTERPOLATION_POLAR:
coordinate = node.get_polar_coordinate()
node.set_draw_info({'final_coordinate':coordinate})
# adjusting polar coordinates
ri, ti = node.get_draw_info('start_coordinate')
rf, tf = node.get_draw_info('final_coordinate')
# normalization [0, 360]
ti = geometry.normalize_angle(ti)
tf = geometry.normalize_angle(tf)
# against longest path
ti, tf = geometry.calculate_short_path(ti, tf)
# main node goes direct to center (no arc)
if node == self.__graph.get_main_node(): tf = ti
# old main node goes direct to new position (no arc)
if node == old_main_node: ti = tf
node.set_draw_info({'start_coordinate':(ri, ti)})
node.set_draw_info({'final_coordinate':(rf, tf)})
elif self.__interpolation == INTERPOLATION_CARTESIAN:
coordinate = node.get_cartesian_coordinate()
node.set_draw_info({'final_coordinate':coordinate})
# calculate interpolated points
ai, bi = node.get_draw_info('start_coordinate')
af, bf = node.get_draw_info('final_coordinate')
l2di.set_start_point(ai, bi)
l2di.set_final_point(af, bf)
if self.__interpolation_slow_in_out:
points = l2di.get_weighed_points(self.__number_of_frames, steps)
else:
points = l2di.get_points(self.__number_of_frames)
node.set_draw_info({'interpolated_coordinate':points})
return True
def __livens_up(self, index=0):
"""
"""
if self.__graph is None:
# Bail out if the graph became empty during an animation.
self.__last_group_node = None
self.__animating = False
return False
# prepare interpolated points
if index == 0:
# prevent unnecessary animation
no_need_to_move = True
for node in self.__graph.get_nodes():
ai, bi = node.get_draw_info('start_coordinate')
af, bf = node.get_draw_info('final_coordinate')
start_c = round(ai), round(bi)
final_c = round(af), round(bf)
if start_c != final_c:
no_need_to_move = False
if no_need_to_move:
self.__animating = False
return False
# move all nodes for pass 'index'
for node in self.__graph.get_nodes():
a, b = node.get_draw_info('interpolated_coordinate')[index]
if self.__interpolation == INTERPOLATION_POLAR:
node.set_polar_coordinate(a, b)
elif self.__interpolation == INTERPOLATION_CARTESIAN:
node.set_cartesian_coordinate(a, b)
self.queue_draw()
# animation continue condition
if index < self.__number_of_frames - 1:
gobject.timeout_add(self.__animation_rate, # time to recall
self.__livens_up, # recursive call
index + 1) # next iteration
else:
self.__last_group_node = None
self.__animating = False
return False
@not_is_in_animation
def set_graph(self, graph):
"""
Set graph to be displayed in layout
@type : Graph
@param : Set the graph used in visualization
"""
if graph.get_number_of_nodes() > 0:
self.__graph = graph
self.__calc_node_positions()
self.queue_draw()
else:
self.__graph = None
def get_scanned_nodes(self):
"""
"""
nodes = list()
if self.__graph is None:
return nodes
for node in self.__graph.get_nodes():
if node.get_draw_info('scanned'):
nodes.append(node)
return nodes
def get_graph(self):
"""
"""
return self.__graph
def set_empty(self):
"""
"""
del(self.__graph)
self.__graph = None
self.queue_draw()
def get_rotation(self):
"""
"""
return self.__rotate
@graph_is_not_empty
def set_rotation(self, angle):
"""
"""
delta = angle - self.__rotate
self.__rotate = angle
for node in self.__graph.get_nodes():
theta = node.get_coordinate_theta()
node.set_coordinate_theta(theta + delta)
self.queue_draw()
def get_translation(self):
"""
"""
return self.__translation
@graph_is_not_empty
def set_translation(self, translation):
"""
"""
self.__translation = translation
self.queue_draw()
def is_empty(self):
"""
"""
if self.__graph == None:
return True
return False
def is_in_animation(self):
"""
"""
return self.__animating
def calc_sorted_nodes(self):
"""
"""
self.__sorted_nodes = list(self.__graph.get_nodes())
self.__sorted_nodes.sort(key = lambda n: n.get_draw_info('ring'))
class NetNode(Node):
"""
Node class for radial network widget
"""
def __init__(self):
"""
"""
self.__draw_info = dict()
"""Hash with draw information"""
self.__coordinate = PolarCoordinate()
super(NetNode, self).__init__()
def get_host(self):
"""
Set the HostInfo that this node represents
"""
return self.get_data()
def set_host(self, host):
"""
Set the HostInfo that this node represents
"""
self.set_data(host)
def get_info(self, info):
"""Return various information extracted from the host set with
set_host."""
host = self.get_data()
if host is not None:
if info == "number_of_open_ports":
return host.get_port_count_by_states(["open"])
elif info == "vulnerability_score":
num_open_ports = host.get_port_count_by_states(["open"])
if num_open_ports < 3:
return 0
elif num_open_ports < 7:
return 1
else:
return 2
elif info == "addresses":
addresses = []
if host.ip is not None:
addresses.append(host.ip)
if host.ipv6 is not None:
addresses.append(host.ipv6)
if host.mac is not None:
addresses.append(host.mac)
return addresses
elif info == "ip":
for addr in (host.ip, host.ipv6, host.mac):
if addr:
return addr.get("addr")
elif info == "hostnames":
hostnames = []
for hostname in host.hostnames:
copy = {}
copy["name"] = hostname.get("hostname", "")
copy["type"] = hostname.get("hostname_type", "")
hostnames.append(copy)
return hostnames
elif info == "hostname":
return host.get_hostname()
elif info == "uptime":
if host.uptime.get("seconds") or host.uptime.get("lastboot"):
return host.uptime
elif info == "device_type":
osmatch = host.get_best_osmatch()
if osmatch is None:
return None
osclasses = osmatch['osclasses']
if len(osclasses) == 0:
return None
types = ["router", "wap", "switch", "firewall"]
for type in types:
if type in osclasses[0].get("type", "").lower():
return type
elif info == "os":
os = {}
# osmatches
if len(host.osmatches) > 0 and \
host.osmatches[0]["accuracy"] != "" and \
host.osmatches[0]["name"] != "":
if os == None:
os = {}
os["matches"] = host.osmatches
os["matches"][0]["db_line"] = 0 # not supported
os_classes = []
for osclass in host.osmatches[0]["osclasses"]:
os_class = {}
os_class["type"] = osclass.get("type", "")
os_class["vendor"] = osclass.get("vendor", "")
#os_class["accuracy"] = int(osclass.get("accuracy", ""))
os_class["accuracy"] = osclass.get("accuracy", "")
os_class["os_family"] = osclass.get("osfamily", "")
os_class["os_gen"] = osclass.get("osgen", "")
os_classes.append(os_class)
os["classes"] = os_classes
# ports_used
if len(host.ports_used) > 0:
if os == None:
os = {}
os_portsused = []
for portused in host.ports_used:
os_portused = {}
os_portused["state"] = portused.get("state", "")
os_portused["protocol"] = portused.get("proto", "")
os_portused["id"] = int(portused.get("portid", "0"))
os_portsused.append(os_portused)
os["used_ports"] = os_portsused
if len(os) > 0:
os["fingerprint"] = ""
return os
elif info == "sequences":
# getting sequences information
sequences = {}
# If all fields are empty, we don't put it into the sequences list
if reduce(lambda x,y: x + y, host.tcpsequence.values(), "") != "":
tcp = {}
if host.tcpsequence.get("index", "") != "":
tcp["index"] = int(host.tcpsequence["index"])
else:
tcp["index"] = 0
tcp["class"] = "" # not supported
tcp["values"] = host.tcpsequence.get("values", "").split(",")
tcp["difficulty"] = host.tcpsequence.get("difficulty", "")
sequences["tcp"] = tcp
if reduce(lambda x,y: x + y, host.ipidsequence.values(), "") != "":
ip_id = {}
ip_id["class"] = host.ipidsequence.get("class", "")
ip_id["values"] = host.ipidsequence.get("values", "").split(",")
sequences["ip_id"] = ip_id
if reduce(lambda x,y: x + y, host.tcptssequence.values(), "") != "":
tcp_ts = {}
tcp_ts["class"] = host.tcptssequence.get("class", "")
tcp_ts["values"] = host.tcptssequence.get("values", "").split(",")
sequences["tcp_ts"] = tcp_ts
return sequences
elif info == "filtered":
if len(host.extraports) > 0 and host.extraports[0]["state"] == "filtered":
return True
else:
for port in host.ports:
if port["port_state"] == "filtered":
return True
break
return False
elif info == "ports":
ports = list()
for host_port in host.ports:
port = dict()
state = dict()
service = dict()
port["id"] = int(host_port.get("portid", ""))
port["protocol"] = host_port.get("protocol", "")
state["state"] = host_port.get("port_state", "")
state["reason"] = "" # not supported
state["reason_ttl"] = "" # not supported
state["reason_ip"] = "" # not supported
service["name"] = host_port.get("service_name", "")
service["conf"] = host_port.get("service_conf", "")
service["method"] = host_port.get("service_method", "")
service["version"] = host_port.get("service_version", "")
service["product"] = host_port.get("service_product", "")
service["extrainfo"] = host_port.get("service_extrainfo", "")
port["state"] = state
port["scripts"] = None # not supported
port["service"] = service
ports.append(port)
return ports
elif info == "extraports":
# extraports
all_extraports = list()
for extraport in host.extraports:
extraports = dict()
extraports["count"] = int(extraport.get("count", ""))
extraports["state"] = extraport.get("state", "")
extraports["reason"] = list() # not supported
extraports["all_reason"] = list() # not supported
all_extraports.append(extraports)
return all_extraports
elif info == "trace":
# getting traceroute information
if len(host.trace) > 0:
trace = {}
hops = []
for host_hop in host.trace.get("hops", []):
hop = {}
hop["ip"] = host_hop.get("ipaddr", "")
hop["ttl"] = int(host_hop.get("ttl", ""))
hop["rtt"] = host_hop.get("rtt", "")
hop["hostname"] = host_hop.get("host", "")
hops.append(hop)
trace["hops"] = hops
trace["port"] = host.trace.get("port", "")
trace["protocol"] = host.trace.get("proto", "")
return trace
else: # host is None
pass
return None
def get_coordinate_theta(self):
"""
"""
return self.__coordinate.get_theta()
def get_coordinate_radius(self):
"""
"""
return self.__coordinate.get_radius()
def set_coordinate_theta(self, value):
"""
"""
self.__coordinate.set_theta(value)
def set_coordinate_radius(self, value):
"""
"""
self.__coordinate.set_radius(value)
def set_polar_coordinate(self, r, t):
"""
Set polar coordinate
@type r: number
@param r: The radius of coordinate
@type t: number
@param t: The angle (theta) of coordinate in radians
"""
self.__coordinate.set_coordinate(r, t)
def get_polar_coordinate(self):
"""
Get cartesian coordinate
@rtype: tuple
@return: Cartesian coordinates (x, y)
"""
return self.__coordinate.get_coordinate()
def set_cartesian_coordinate(self, x, y):
"""
Set cartesian coordinate
"""
cartesian = CartesianCoordinate(x, y)
r, t = cartesian.to_polar()
self.set_polar_coordinate(r, math.degrees(t))
def get_cartesian_coordinate(self):
"""
Get cartesian coordinate
@rtype: tuple
@return: Cartesian coordinates (x, y)
"""
return self.__coordinate.to_cartesian()
def get_draw_info(self, info=None):
"""
Get draw information about node
@type : string
@param : Information name
@rtype: mixed
@return: The requested information
"""
if info == None:
return self.__draw_info
if self.__draw_info.has_key(info):
return self.__draw_info[info]
return None
def set_draw_info(self, info):
"""
Set draw information
@type : dict
@param : Draw information dictionary
"""
for key in info:
self.__draw_info[key] = info[key]
def deep_search_child(self, node):
"""
"""
for child in self.get_draw_info('children'):
if child == node:
return True
elif child.deep_search_child(node):
return True
return False
def set_subtree_info(self, info):
"""
"""
for child in self.get_draw_info('children'):
child.set_draw_info(info)
if child.get_draw_info('group') != True:
child.set_subtree_info(info)
def calc_needed_space(self):
"""
"""
number_of_children = len(self.get_draw_info('children'))
sum_angle = 0
own_angle = 0
if number_of_children > 0 and self.get_draw_info('group') != True:
for child in self.get_draw_info('children'):
child.calc_needed_space()
sum_angle += child.get_draw_info('space_need')
distance = self.get_coordinate_radius()
size = self.get_draw_info('radius') * 2
own_angle = geometry.angle_from_object(distance, size)
self.set_draw_info({'children_need':sum_angle})
self.set_draw_info({'space_need':max(sum_angle, own_angle)})
| gpl-2.0 |
bbc/kamaelia | Sketches/MPS/pprocess/ProcessPipelineNotComponent.py | 3 | 3481 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#...x....1....x....2....x....3....x....4....x....5....x.
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import Axon
from Axon.Scheduler import scheduler
import Axon.LikeFile
import pprocess
import time
import pprint
class ProcessWrapComponent(object):
def __init__(self, somecomponent):
self.wrapped = somecomponent.__class__.__name__
print "wrapped a", self.wrapped
self.exchange = pprocess.Exchange()
self.channel = None
self.inbound = []
self.C = somecomponent
self.ce = None
self.tick = time.time()
def _print(self, *args):
print self.wrapped," ".join([str(x) for x in args])
def tick_print(self, *args):
if time.time() - self.tick > 0.5:
self._print(*args)
self.tick = time.time()
def run(self, channel):
self.exchange.add(channel)
self.channel = channel
from Axon.LikeFile import likefile, background
background(zap=True).start()
time.sleep(0.1)
self.ce = likefile(self.C)
for i in self.main():
pass
def activate(self):
channel = pprocess.start(self.run)
return channel
def getDataFromReadyChannel(self):
chan = self.exchange.ready(0)[0]
D = chan._receive()
return D
def passOnDataToComponent(self, D):
self._print("pwc:- SEND", D, "TO", self.C.name)
self.ce.put(*D)
self._print("SENT")
def main(self):
while 1:
self.tick_print("")
if self.exchange.ready(0):
D = self.getDataFromReadyChannel()
self.passOnDataToComponent(D)
D = self.ce.anyReady()
if D:
for boxname in D:
D = self.ce.get(boxname)
self.channel._send((D, boxname))
yield 1
if self.channel.closed:
self._print(self.channel.closed)
def ProcessPipeline(*components):
exchange = pprocess.Exchange()
debug = False
chans = []
print "TESTING ME"
for comp in components:
A = ProcessWrapComponent( comp )
chan = A.activate()
chans.append( chan )
exchange.add(chan )
mappings = {}
for i in xrange(len(components)-1):
ci, cin = chans[i], chans[i+1]
mappings[ (ci, "outbox") ] = (cin, "inbox")
mappings[ (ci, "signal") ] = (cin, "control")
while 1:
for chan in exchange.ready(0):
D = chan._receive()
try:
dest = mappings[ ( chan, D[1] ) ]
dest[0]._send( (D[0], dest[1] ) )
print "FORWARDED", D
except KeyError:
if debug:
print "WARNING: unlinked box sent data"
print "This may be an error for your logic"
print "chan, D[1] D[0]",
print chan, D[1], repr(D[0])
pprint.pprint( mappings )
time.sleep(0.1)
| apache-2.0 |
souravsingh/sympy | sympy/simplify/ratsimp.py | 13 | 7527 | from __future__ import print_function, division
from itertools import combinations_with_replacement
from sympy.core import symbols, Add, Dummy
from sympy.core.numbers import Rational
from sympy.polys import cancel, ComputationFailed, parallel_poly_from_expr, reduced, Poly
from sympy.polys.monomials import Monomial, monomial_div
from sympy.polys.polyerrors import PolificationFailed
from sympy.utilities.misc import debug
def ratsimp(expr):
"""
Put an expression over a common denominator, cancel and reduce.
Examples
========
>>> from sympy import ratsimp
>>> from sympy.abc import x, y
>>> ratsimp(1/x + 1/y)
(x + y)/(x*y)
"""
f, g = cancel(expr).as_numer_denom()
try:
Q, r = reduced(f, [g], field=True, expand=False)
except ComputationFailed:
return f/g
return Add(*Q) + cancel(r/g)
def ratsimpmodprime(expr, G, *gens, **args):
"""
Simplifies a rational expression ``expr`` modulo the prime ideal
generated by ``G``. ``G`` should be a Groebner basis of the
ideal.
>>> from sympy.simplify.ratsimp import ratsimpmodprime
>>> from sympy.abc import x, y
>>> eq = (x + y**5 + y)/(x - y)
>>> ratsimpmodprime(eq, [x*y**5 - x - y], x, y, order='lex')
(x**2 + x*y + x + y)/(x**2 - x*y)
If ``polynomial`` is False, the algorithm computes a rational
simplification which minimizes the sum of the total degrees of
the numerator and the denominator.
If ``polynomial`` is True, this function just brings numerator and
denominator into a canonical form. This is much faster, but has
potentially worse results.
References
==========
M. Monagan, R. Pearce, Rational Simplification Modulo a Polynomial
Ideal,
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.163.6984
(specifically, the second algorithm)
"""
from sympy import solve
quick = args.pop('quick', True)
polynomial = args.pop('polynomial', False)
debug('ratsimpmodprime', expr)
# usual preparation of polynomials:
num, denom = cancel(expr).as_numer_denom()
try:
polys, opt = parallel_poly_from_expr([num, denom] + G, *gens, **args)
except PolificationFailed:
return expr
domain = opt.domain
if domain.has_assoc_Field:
opt.domain = domain.get_field()
else:
raise DomainError(
"can't compute rational simplification over %s" % domain)
# compute only once
leading_monomials = [g.LM(opt.order) for g in polys[2:]]
tested = set()
def staircase(n):
"""
Compute all monomials with degree less than ``n`` that are
not divisible by any element of ``leading_monomials``.
"""
if n == 0:
return [1]
S = []
for mi in combinations_with_replacement(range(len(opt.gens)), n):
m = [0]*len(opt.gens)
for i in mi:
m[i] += 1
if all([monomial_div(m, lmg) is None for lmg in
leading_monomials]):
S.append(m)
return [Monomial(s).as_expr(*opt.gens) for s in S] + staircase(n - 1)
def _ratsimpmodprime(a, b, allsol, N=0, D=0):
"""
Computes a rational simplification of ``a/b`` which minimizes
the sum of the total degrees of the numerator and the denominator.
The algorithm proceeds by looking at ``a * d - b * c`` modulo
the ideal generated by ``G`` for some ``c`` and ``d`` with degree
less than ``a`` and ``b`` respectively.
The coefficients of ``c`` and ``d`` are indeterminates and thus
the coefficients of the normalform of ``a * d - b * c`` are
linear polynomials in these indeterminates.
If these linear polynomials, considered as system of
equations, have a nontrivial solution, then `\frac{a}{b}
\equiv \frac{c}{d}` modulo the ideal generated by ``G``. So,
by construction, the degree of ``c`` and ``d`` is less than
the degree of ``a`` and ``b``, so a simpler representation
has been found.
After a simpler representation has been found, the algorithm
tries to reduce the degree of the numerator and denominator
and returns the result afterwards.
As an extension, if quick=False, we look at all possible degrees such
that the total degree is less than *or equal to* the best current
solution. We retain a list of all solutions of minimal degree, and try
to find the best one at the end.
"""
c, d = a, b
steps = 0
maxdeg = a.total_degree() + b.total_degree()
if quick:
bound = maxdeg - 1
else:
bound = maxdeg
while N + D <= bound:
if (N, D) in tested:
break
tested.add((N, D))
M1 = staircase(N)
M2 = staircase(D)
debug('%s / %s: %s, %s' % (N, D, M1, M2))
Cs = symbols("c:%d" % len(M1), cls=Dummy)
Ds = symbols("d:%d" % len(M2), cls=Dummy)
ng = Cs + Ds
c_hat = Poly(
sum([Cs[i] * M1[i] for i in range(len(M1))]), opt.gens + ng)
d_hat = Poly(
sum([Ds[i] * M2[i] for i in range(len(M2))]), opt.gens + ng)
r = reduced(a * d_hat - b * c_hat, G, opt.gens + ng,
order=opt.order, polys=True)[1]
S = Poly(r, gens=opt.gens).coeffs()
sol = solve(S, Cs + Ds, particular=True, quick=True)
if sol and not all([s == 0 for s in sol.values()]):
c = c_hat.subs(sol)
d = d_hat.subs(sol)
# The "free" variables occuring before as parameters
# might still be in the substituted c, d, so set them
# to the value chosen before:
c = c.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
d = d.subs(dict(list(zip(Cs + Ds, [1] * (len(Cs) + len(Ds))))))
c = Poly(c, opt.gens)
d = Poly(d, opt.gens)
if d == 0:
raise ValueError('Ideal not prime?')
allsol.append((c_hat, d_hat, S, Cs + Ds))
if N + D != maxdeg:
allsol = [allsol[-1]]
break
steps += 1
N += 1
D += 1
if steps > 0:
c, d, allsol = _ratsimpmodprime(c, d, allsol, N, D - steps)
c, d, allsol = _ratsimpmodprime(c, d, allsol, N - steps, D)
return c, d, allsol
# preprocessing. this improves performance a bit when deg(num)
# and deg(denom) are large:
num = reduced(num, G, opt.gens, order=opt.order)[1]
denom = reduced(denom, G, opt.gens, order=opt.order)[1]
if polynomial:
return (num/denom).cancel()
c, d, allsol = _ratsimpmodprime(
Poly(num, opt.gens), Poly(denom, opt.gens), [])
if not quick and allsol:
debug('Looking for best minimal solution. Got: %s' % len(allsol))
newsol = []
for c_hat, d_hat, S, ng in allsol:
sol = solve(S, ng, particular=True, quick=False)
newsol.append((c_hat.subs(sol), d_hat.subs(sol)))
c, d = min(newsol, key=lambda x: len(x[0].terms()) + len(x[1].terms()))
if not domain.has_Field:
cn, c = c.clear_denoms(convert=True)
dn, d = d.clear_denoms(convert=True)
r = Rational(cn, dn)
return (c*r.q)/(d*r.p)
| bsd-3-clause |
zephyrplugins/zephyr | zephyr.plugin.jython/jython2.5.2rc3/Lib/sre_constants.py | 349 | 7137 | #
# Secret Labs' Regular Expression Engine
#
# various symbols used by the regular expression engine.
# run this script to update the _sre include files!
#
# Copyright (c) 1998-2001 by Secret Labs AB. All rights reserved.
#
# See the sre.py file for information on usage and redistribution.
#
"""Internal support module for sre"""
# update when constants are added or removed
MAGIC = 20031017
# max code word in this release
MAXREPEAT = 65535
# SRE standard exception (access as sre.error)
# should this really be here?
class error(Exception):
pass
# operators
FAILURE = "failure"
SUCCESS = "success"
ANY = "any"
ANY_ALL = "any_all"
ASSERT = "assert"
ASSERT_NOT = "assert_not"
AT = "at"
BIGCHARSET = "bigcharset"
BRANCH = "branch"
CALL = "call"
CATEGORY = "category"
CHARSET = "charset"
GROUPREF = "groupref"
GROUPREF_IGNORE = "groupref_ignore"
GROUPREF_EXISTS = "groupref_exists"
IN = "in"
IN_IGNORE = "in_ignore"
INFO = "info"
JUMP = "jump"
LITERAL = "literal"
LITERAL_IGNORE = "literal_ignore"
MARK = "mark"
MAX_REPEAT = "max_repeat"
MAX_UNTIL = "max_until"
MIN_REPEAT = "min_repeat"
MIN_UNTIL = "min_until"
NEGATE = "negate"
NOT_LITERAL = "not_literal"
NOT_LITERAL_IGNORE = "not_literal_ignore"
RANGE = "range"
REPEAT = "repeat"
REPEAT_ONE = "repeat_one"
SUBPATTERN = "subpattern"
MIN_REPEAT_ONE = "min_repeat_one"
# positions
AT_BEGINNING = "at_beginning"
AT_BEGINNING_LINE = "at_beginning_line"
AT_BEGINNING_STRING = "at_beginning_string"
AT_BOUNDARY = "at_boundary"
AT_NON_BOUNDARY = "at_non_boundary"
AT_END = "at_end"
AT_END_LINE = "at_end_line"
AT_END_STRING = "at_end_string"
AT_LOC_BOUNDARY = "at_loc_boundary"
AT_LOC_NON_BOUNDARY = "at_loc_non_boundary"
AT_UNI_BOUNDARY = "at_uni_boundary"
AT_UNI_NON_BOUNDARY = "at_uni_non_boundary"
# categories
CATEGORY_DIGIT = "category_digit"
CATEGORY_NOT_DIGIT = "category_not_digit"
CATEGORY_SPACE = "category_space"
CATEGORY_NOT_SPACE = "category_not_space"
CATEGORY_WORD = "category_word"
CATEGORY_NOT_WORD = "category_not_word"
CATEGORY_LINEBREAK = "category_linebreak"
CATEGORY_NOT_LINEBREAK = "category_not_linebreak"
CATEGORY_LOC_WORD = "category_loc_word"
CATEGORY_LOC_NOT_WORD = "category_loc_not_word"
CATEGORY_UNI_DIGIT = "category_uni_digit"
CATEGORY_UNI_NOT_DIGIT = "category_uni_not_digit"
CATEGORY_UNI_SPACE = "category_uni_space"
CATEGORY_UNI_NOT_SPACE = "category_uni_not_space"
CATEGORY_UNI_WORD = "category_uni_word"
CATEGORY_UNI_NOT_WORD = "category_uni_not_word"
CATEGORY_UNI_LINEBREAK = "category_uni_linebreak"
CATEGORY_UNI_NOT_LINEBREAK = "category_uni_not_linebreak"
OPCODES = [
# failure=0 success=1 (just because it looks better that way :-)
FAILURE, SUCCESS,
ANY, ANY_ALL,
ASSERT, ASSERT_NOT,
AT,
BRANCH,
CALL,
CATEGORY,
CHARSET, BIGCHARSET,
GROUPREF, GROUPREF_EXISTS, GROUPREF_IGNORE,
IN, IN_IGNORE,
INFO,
JUMP,
LITERAL, LITERAL_IGNORE,
MARK,
MAX_UNTIL,
MIN_UNTIL,
NOT_LITERAL, NOT_LITERAL_IGNORE,
NEGATE,
RANGE,
REPEAT,
REPEAT_ONE,
SUBPATTERN,
MIN_REPEAT_ONE
]
ATCODES = [
AT_BEGINNING, AT_BEGINNING_LINE, AT_BEGINNING_STRING, AT_BOUNDARY,
AT_NON_BOUNDARY, AT_END, AT_END_LINE, AT_END_STRING,
AT_LOC_BOUNDARY, AT_LOC_NON_BOUNDARY, AT_UNI_BOUNDARY,
AT_UNI_NON_BOUNDARY
]
CHCODES = [
CATEGORY_DIGIT, CATEGORY_NOT_DIGIT, CATEGORY_SPACE,
CATEGORY_NOT_SPACE, CATEGORY_WORD, CATEGORY_NOT_WORD,
CATEGORY_LINEBREAK, CATEGORY_NOT_LINEBREAK, CATEGORY_LOC_WORD,
CATEGORY_LOC_NOT_WORD, CATEGORY_UNI_DIGIT, CATEGORY_UNI_NOT_DIGIT,
CATEGORY_UNI_SPACE, CATEGORY_UNI_NOT_SPACE, CATEGORY_UNI_WORD,
CATEGORY_UNI_NOT_WORD, CATEGORY_UNI_LINEBREAK,
CATEGORY_UNI_NOT_LINEBREAK
]
def makedict(list):
d = {}
i = 0
for item in list:
d[item] = i
i = i + 1
return d
OPCODES = makedict(OPCODES)
ATCODES = makedict(ATCODES)
CHCODES = makedict(CHCODES)
# replacement operations for "ignore case" mode
OP_IGNORE = {
GROUPREF: GROUPREF_IGNORE,
IN: IN_IGNORE,
LITERAL: LITERAL_IGNORE,
NOT_LITERAL: NOT_LITERAL_IGNORE
}
AT_MULTILINE = {
AT_BEGINNING: AT_BEGINNING_LINE,
AT_END: AT_END_LINE
}
AT_LOCALE = {
AT_BOUNDARY: AT_LOC_BOUNDARY,
AT_NON_BOUNDARY: AT_LOC_NON_BOUNDARY
}
AT_UNICODE = {
AT_BOUNDARY: AT_UNI_BOUNDARY,
AT_NON_BOUNDARY: AT_UNI_NON_BOUNDARY
}
CH_LOCALE = {
CATEGORY_DIGIT: CATEGORY_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_NOT_SPACE,
CATEGORY_WORD: CATEGORY_LOC_WORD,
CATEGORY_NOT_WORD: CATEGORY_LOC_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_NOT_LINEBREAK
}
CH_UNICODE = {
CATEGORY_DIGIT: CATEGORY_UNI_DIGIT,
CATEGORY_NOT_DIGIT: CATEGORY_UNI_NOT_DIGIT,
CATEGORY_SPACE: CATEGORY_UNI_SPACE,
CATEGORY_NOT_SPACE: CATEGORY_UNI_NOT_SPACE,
CATEGORY_WORD: CATEGORY_UNI_WORD,
CATEGORY_NOT_WORD: CATEGORY_UNI_NOT_WORD,
CATEGORY_LINEBREAK: CATEGORY_UNI_LINEBREAK,
CATEGORY_NOT_LINEBREAK: CATEGORY_UNI_NOT_LINEBREAK
}
# flags
SRE_FLAG_TEMPLATE = 1 # template mode (disable backtracking)
SRE_FLAG_IGNORECASE = 2 # case insensitive
SRE_FLAG_LOCALE = 4 # honour system locale
SRE_FLAG_MULTILINE = 8 # treat target as multiline string
SRE_FLAG_DOTALL = 16 # treat target as a single string
SRE_FLAG_UNICODE = 32 # use unicode locale
SRE_FLAG_VERBOSE = 64 # ignore whitespace and comments
SRE_FLAG_DEBUG = 128 # debugging
# flags for INFO primitive
SRE_INFO_PREFIX = 1 # has prefix
SRE_INFO_LITERAL = 2 # entire pattern is literal (given by prefix)
SRE_INFO_CHARSET = 4 # pattern starts with character from given set
if __name__ == "__main__":
def dump(f, d, prefix):
items = d.items()
items.sort(key=lambda a: a[1])
for k, v in items:
f.write("#define %s_%s %s\n" % (prefix, k.upper(), v))
f = open("sre_constants.h", "w")
f.write("""\
/*
* Secret Labs' Regular Expression Engine
*
* regular expression matching engine
*
* NOTE: This file is generated by sre_constants.py. If you need
* to change anything in here, edit sre_constants.py and run it.
*
* Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
*
* See the _sre.c file for information on usage and redistribution.
*/
""")
f.write("#define SRE_MAGIC %d\n" % MAGIC)
dump(f, OPCODES, "SRE_OP")
dump(f, ATCODES, "SRE")
dump(f, CHCODES, "SRE")
f.write("#define SRE_FLAG_TEMPLATE %d\n" % SRE_FLAG_TEMPLATE)
f.write("#define SRE_FLAG_IGNORECASE %d\n" % SRE_FLAG_IGNORECASE)
f.write("#define SRE_FLAG_LOCALE %d\n" % SRE_FLAG_LOCALE)
f.write("#define SRE_FLAG_MULTILINE %d\n" % SRE_FLAG_MULTILINE)
f.write("#define SRE_FLAG_DOTALL %d\n" % SRE_FLAG_DOTALL)
f.write("#define SRE_FLAG_UNICODE %d\n" % SRE_FLAG_UNICODE)
f.write("#define SRE_FLAG_VERBOSE %d\n" % SRE_FLAG_VERBOSE)
f.write("#define SRE_INFO_PREFIX %d\n" % SRE_INFO_PREFIX)
f.write("#define SRE_INFO_LITERAL %d\n" % SRE_INFO_LITERAL)
f.write("#define SRE_INFO_CHARSET %d\n" % SRE_INFO_CHARSET)
f.close()
print "done"
| epl-1.0 |
hbrunn/OCB | addons/l10n_fr/l10n_fr.py | 336 | 2089 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class l10n_fr_report(osv.osv):
_name = 'l10n.fr.report'
_description = 'Report for l10n_fr'
_columns = {
'code': fields.char('Code', size=64),
'name': fields.char('Name'),
'line_ids': fields.one2many('l10n.fr.line', 'report_id', 'Lines', copy=True),
}
_sql_constraints = [
('code_uniq', 'unique (code)','The code report must be unique !')
]
class l10n_fr_line(osv.osv):
_name = 'l10n.fr.line'
_description = 'Report Lines for l10n_fr'
_columns = {
'code': fields.char('Variable Name', size=64),
'definition': fields.char('Definition'),
'name': fields.char('Name'),
'report_id': fields.many2one('l10n.fr.report', 'Report'),
}
_sql_constraints = [
('code_uniq', 'unique (code)', 'The variable name must be unique !')
]
class res_company(osv.osv):
_inherit = 'res.company'
_columns = {
'siret': fields.char('SIRET', size=14),
'ape': fields.char('APE'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
keedio/hue | desktop/core/ext-py/pycrypto-2.6.1/lib/Crypto/SelfTest/Cipher/test_DES.py | 119 | 15009 | # -*- coding: utf-8 -*-
#
# SelfTest/Cipher/DES.py: Self-test for the (Single) DES cipher
#
# Written in 2008 by Dwayne C. Litzenberger <[email protected]>
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""Self-test suite for Crypto.Cipher.DES"""
__revision__ = "$Id$"
from common import dict # For compatibility with Python 2.1 and 2.2
from Crypto.Util.py3compat import *
import unittest
# This is a list of (plaintext, ciphertext, key, description) tuples.
SP800_17_B1_KEY = '01' * 8
SP800_17_B2_PT = '00' * 8
test_data = [
# Test vectors from Appendix A of NIST SP 800-17
# "Modes of Operation Validation System (MOVS): Requirements and Procedures"
# http://csrc.nist.gov/publications/nistpubs/800-17/800-17.pdf
# Appendix A - "Sample Round Outputs for the DES"
('0000000000000000', '82dcbafbdeab6602', '10316e028c8f3b4a',
"NIST SP800-17 A"),
# Table B.1 - Variable Plaintext Known Answer Test
('8000000000000000', '95f8a5e5dd31d900', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #0'),
('4000000000000000', 'dd7f121ca5015619', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #1'),
('2000000000000000', '2e8653104f3834ea', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #2'),
('1000000000000000', '4bd388ff6cd81d4f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #3'),
('0800000000000000', '20b9e767b2fb1456', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #4'),
('0400000000000000', '55579380d77138ef', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #5'),
('0200000000000000', '6cc5defaaf04512f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #6'),
('0100000000000000', '0d9f279ba5d87260', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #7'),
('0080000000000000', 'd9031b0271bd5a0a', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #8'),
('0040000000000000', '424250b37c3dd951', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #9'),
('0020000000000000', 'b8061b7ecd9a21e5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #10'),
('0010000000000000', 'f15d0f286b65bd28', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #11'),
('0008000000000000', 'add0cc8d6e5deba1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #12'),
('0004000000000000', 'e6d5f82752ad63d1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #13'),
('0002000000000000', 'ecbfe3bd3f591a5e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #14'),
('0001000000000000', 'f356834379d165cd', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #15'),
('0000800000000000', '2b9f982f20037fa9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #16'),
('0000400000000000', '889de068a16f0be6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #17'),
('0000200000000000', 'e19e275d846a1298', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #18'),
('0000100000000000', '329a8ed523d71aec', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #19'),
('0000080000000000', 'e7fce22557d23c97', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #20'),
('0000040000000000', '12a9f5817ff2d65d', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #21'),
('0000020000000000', 'a484c3ad38dc9c19', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #22'),
('0000010000000000', 'fbe00a8a1ef8ad72', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #23'),
('0000008000000000', '750d079407521363', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #24'),
('0000004000000000', '64feed9c724c2faf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #25'),
('0000002000000000', 'f02b263b328e2b60', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #26'),
('0000001000000000', '9d64555a9a10b852', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #27'),
('0000000800000000', 'd106ff0bed5255d7', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #28'),
('0000000400000000', 'e1652c6b138c64a5', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #29'),
('0000000200000000', 'e428581186ec8f46', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #30'),
('0000000100000000', 'aeb5f5ede22d1a36', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #31'),
('0000000080000000', 'e943d7568aec0c5c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #32'),
('0000000040000000', 'df98c8276f54b04b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #33'),
('0000000020000000', 'b160e4680f6c696f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #34'),
('0000000010000000', 'fa0752b07d9c4ab8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #35'),
('0000000008000000', 'ca3a2b036dbc8502', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #36'),
('0000000004000000', '5e0905517bb59bcf', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #37'),
('0000000002000000', '814eeb3b91d90726', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #38'),
('0000000001000000', '4d49db1532919c9f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #39'),
('0000000000800000', '25eb5fc3f8cf0621', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #40'),
('0000000000400000', 'ab6a20c0620d1c6f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #41'),
('0000000000200000', '79e90dbc98f92cca', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #42'),
('0000000000100000', '866ecedd8072bb0e', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #43'),
('0000000000080000', '8b54536f2f3e64a8', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #44'),
('0000000000040000', 'ea51d3975595b86b', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #45'),
('0000000000020000', 'caffc6ac4542de31', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #46'),
('0000000000010000', '8dd45a2ddf90796c', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #47'),
('0000000000008000', '1029d55e880ec2d0', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #48'),
('0000000000004000', '5d86cb23639dbea9', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #49'),
('0000000000002000', '1d1ca853ae7c0c5f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #50'),
('0000000000001000', 'ce332329248f3228', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #51'),
('0000000000000800', '8405d1abe24fb942', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #52'),
('0000000000000400', 'e643d78090ca4207', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #53'),
('0000000000000200', '48221b9937748a23', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #54'),
('0000000000000100', 'dd7c0bbd61fafd54', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #55'),
('0000000000000080', '2fbc291a570db5c4', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #56'),
('0000000000000040', 'e07c30d7e4e26e12', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #57'),
('0000000000000020', '0953e2258e8e90a1', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #58'),
('0000000000000010', '5b711bc4ceebf2ee', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #59'),
('0000000000000008', 'cc083f1e6d9e85f6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #60'),
('0000000000000004', 'd2fd8867d50d2dfe', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #61'),
('0000000000000002', '06e7ea22ce92708f', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #62'),
('0000000000000001', '166b40b44aba4bd6', SP800_17_B1_KEY,
'NIST SP800-17 B.1 #63'),
# Table B.2 - Variable Key Known Answer Test
(SP800_17_B2_PT, '95a8d72813daa94d', '8001010101010101',
'NIST SP800-17 B.2 #0'),
(SP800_17_B2_PT, '0eec1487dd8c26d5', '4001010101010101',
'NIST SP800-17 B.2 #1'),
(SP800_17_B2_PT, '7ad16ffb79c45926', '2001010101010101',
'NIST SP800-17 B.2 #2'),
(SP800_17_B2_PT, 'd3746294ca6a6cf3', '1001010101010101',
'NIST SP800-17 B.2 #3'),
(SP800_17_B2_PT, '809f5f873c1fd761', '0801010101010101',
'NIST SP800-17 B.2 #4'),
(SP800_17_B2_PT, 'c02faffec989d1fc', '0401010101010101',
'NIST SP800-17 B.2 #5'),
(SP800_17_B2_PT, '4615aa1d33e72f10', '0201010101010101',
'NIST SP800-17 B.2 #6'),
(SP800_17_B2_PT, '2055123350c00858', '0180010101010101',
'NIST SP800-17 B.2 #7'),
(SP800_17_B2_PT, 'df3b99d6577397c8', '0140010101010101',
'NIST SP800-17 B.2 #8'),
(SP800_17_B2_PT, '31fe17369b5288c9', '0120010101010101',
'NIST SP800-17 B.2 #9'),
(SP800_17_B2_PT, 'dfdd3cc64dae1642', '0110010101010101',
'NIST SP800-17 B.2 #10'),
(SP800_17_B2_PT, '178c83ce2b399d94', '0108010101010101',
'NIST SP800-17 B.2 #11'),
(SP800_17_B2_PT, '50f636324a9b7f80', '0104010101010101',
'NIST SP800-17 B.2 #12'),
(SP800_17_B2_PT, 'a8468ee3bc18f06d', '0102010101010101',
'NIST SP800-17 B.2 #13'),
(SP800_17_B2_PT, 'a2dc9e92fd3cde92', '0101800101010101',
'NIST SP800-17 B.2 #14'),
(SP800_17_B2_PT, 'cac09f797d031287', '0101400101010101',
'NIST SP800-17 B.2 #15'),
(SP800_17_B2_PT, '90ba680b22aeb525', '0101200101010101',
'NIST SP800-17 B.2 #16'),
(SP800_17_B2_PT, 'ce7a24f350e280b6', '0101100101010101',
'NIST SP800-17 B.2 #17'),
(SP800_17_B2_PT, '882bff0aa01a0b87', '0101080101010101',
'NIST SP800-17 B.2 #18'),
(SP800_17_B2_PT, '25610288924511c2', '0101040101010101',
'NIST SP800-17 B.2 #19'),
(SP800_17_B2_PT, 'c71516c29c75d170', '0101020101010101',
'NIST SP800-17 B.2 #20'),
(SP800_17_B2_PT, '5199c29a52c9f059', '0101018001010101',
'NIST SP800-17 B.2 #21'),
(SP800_17_B2_PT, 'c22f0a294a71f29f', '0101014001010101',
'NIST SP800-17 B.2 #22'),
(SP800_17_B2_PT, 'ee371483714c02ea', '0101012001010101',
'NIST SP800-17 B.2 #23'),
(SP800_17_B2_PT, 'a81fbd448f9e522f', '0101011001010101',
'NIST SP800-17 B.2 #24'),
(SP800_17_B2_PT, '4f644c92e192dfed', '0101010801010101',
'NIST SP800-17 B.2 #25'),
(SP800_17_B2_PT, '1afa9a66a6df92ae', '0101010401010101',
'NIST SP800-17 B.2 #26'),
(SP800_17_B2_PT, 'b3c1cc715cb879d8', '0101010201010101',
'NIST SP800-17 B.2 #27'),
(SP800_17_B2_PT, '19d032e64ab0bd8b', '0101010180010101',
'NIST SP800-17 B.2 #28'),
(SP800_17_B2_PT, '3cfaa7a7dc8720dc', '0101010140010101',
'NIST SP800-17 B.2 #29'),
(SP800_17_B2_PT, 'b7265f7f447ac6f3', '0101010120010101',
'NIST SP800-17 B.2 #30'),
(SP800_17_B2_PT, '9db73b3c0d163f54', '0101010110010101',
'NIST SP800-17 B.2 #31'),
(SP800_17_B2_PT, '8181b65babf4a975', '0101010108010101',
'NIST SP800-17 B.2 #32'),
(SP800_17_B2_PT, '93c9b64042eaa240', '0101010104010101',
'NIST SP800-17 B.2 #33'),
(SP800_17_B2_PT, '5570530829705592', '0101010102010101',
'NIST SP800-17 B.2 #34'),
(SP800_17_B2_PT, '8638809e878787a0', '0101010101800101',
'NIST SP800-17 B.2 #35'),
(SP800_17_B2_PT, '41b9a79af79ac208', '0101010101400101',
'NIST SP800-17 B.2 #36'),
(SP800_17_B2_PT, '7a9be42f2009a892', '0101010101200101',
'NIST SP800-17 B.2 #37'),
(SP800_17_B2_PT, '29038d56ba6d2745', '0101010101100101',
'NIST SP800-17 B.2 #38'),
(SP800_17_B2_PT, '5495c6abf1e5df51', '0101010101080101',
'NIST SP800-17 B.2 #39'),
(SP800_17_B2_PT, 'ae13dbd561488933', '0101010101040101',
'NIST SP800-17 B.2 #40'),
(SP800_17_B2_PT, '024d1ffa8904e389', '0101010101020101',
'NIST SP800-17 B.2 #41'),
(SP800_17_B2_PT, 'd1399712f99bf02e', '0101010101018001',
'NIST SP800-17 B.2 #42'),
(SP800_17_B2_PT, '14c1d7c1cffec79e', '0101010101014001',
'NIST SP800-17 B.2 #43'),
(SP800_17_B2_PT, '1de5279dae3bed6f', '0101010101012001',
'NIST SP800-17 B.2 #44'),
(SP800_17_B2_PT, 'e941a33f85501303', '0101010101011001',
'NIST SP800-17 B.2 #45'),
(SP800_17_B2_PT, 'da99dbbc9a03f379', '0101010101010801',
'NIST SP800-17 B.2 #46'),
(SP800_17_B2_PT, 'b7fc92f91d8e92e9', '0101010101010401',
'NIST SP800-17 B.2 #47'),
(SP800_17_B2_PT, 'ae8e5caa3ca04e85', '0101010101010201',
'NIST SP800-17 B.2 #48'),
(SP800_17_B2_PT, '9cc62df43b6eed74', '0101010101010180',
'NIST SP800-17 B.2 #49'),
(SP800_17_B2_PT, 'd863dbb5c59a91a0', '0101010101010140',
'NIST SP800-17 B.2 #50'),
(SP800_17_B2_PT, 'a1ab2190545b91d7', '0101010101010120',
'NIST SP800-17 B.2 #51'),
(SP800_17_B2_PT, '0875041e64c570f7', '0101010101010110',
'NIST SP800-17 B.2 #52'),
(SP800_17_B2_PT, '5a594528bebef1cc', '0101010101010108',
'NIST SP800-17 B.2 #53'),
(SP800_17_B2_PT, 'fcdb3291de21f0c0', '0101010101010104',
'NIST SP800-17 B.2 #54'),
(SP800_17_B2_PT, '869efd7f9f265a09', '0101010101010102',
'NIST SP800-17 B.2 #55'),
]
class RonRivestTest(unittest.TestCase):
""" Ronald L. Rivest's DES test, see
http://people.csail.mit.edu/rivest/Destest.txt
ABSTRACT
--------
We present a simple way to test the correctness of a DES implementation:
Use the recurrence relation:
X0 = 9474B8E8C73BCA7D (hexadecimal)
X(i+1) = IF (i is even) THEN E(Xi,Xi) ELSE D(Xi,Xi)
to compute a sequence of 64-bit values: X0, X1, X2, ..., X16. Here
E(X,K) denotes the DES encryption of X using key K, and D(X,K) denotes
the DES decryption of X using key K. If you obtain
X16 = 1B1A2DDB4C642438
your implementation does not have any of the 36,568 possible single-fault
errors described herein.
"""
def runTest(self):
from Crypto.Cipher import DES
from binascii import b2a_hex
X = []
X[0:] = [b('\x94\x74\xB8\xE8\xC7\x3B\xCA\x7D')]
for i in range(16):
c = DES.new(X[i],DES.MODE_ECB)
if not (i&1): # (num&1) returns 1 for odd numbers
X[i+1:] = [c.encrypt(X[i])] # even
else:
X[i+1:] = [c.decrypt(X[i])] # odd
self.assertEqual(b2a_hex(X[16]),
b2a_hex(b('\x1B\x1A\x2D\xDB\x4C\x64\x24\x38')))
def get_tests(config={}):
from Crypto.Cipher import DES
from common import make_block_tests
return make_block_tests(DES, "DES", test_data) + [RonRivestTest()]
if __name__ == '__main__':
import unittest
suite = lambda: unittest.TestSuite(get_tests())
unittest.main(defaultTest='suite')
# vim:set ts=4 sw=4 sts=4 expandtab:
| apache-2.0 |
yaii/yai | share/extensions/motion.py | 6 | 4713 | #!/usr/bin/env python
'''
Copyright (C) 2005 Aaron Spike, [email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
'''
import math, inkex, simplestyle, simplepath, bezmisc
class Motion(inkex.Effect):
def __init__(self):
inkex.Effect.__init__(self)
self.OptionParser.add_option("-a", "--angle",
action="store", type="float",
dest="angle", default=45.0,
help="direction of the motion vector")
self.OptionParser.add_option("-m", "--magnitude",
action="store", type="float",
dest="magnitude", default=100.0,
help="magnitude of the motion vector")
def makeface(self,last,(cmd, params)):
a = []
a.append(['M',last[:]])
a.append([cmd, params[:]])
#translate path segment along vector
np = params[:]
defs = simplepath.pathdefs[cmd]
for i in range(defs[1]):
if defs[3][i] == 'x':
np[i] += self.vx
elif defs[3][i] == 'y':
np[i] += self.vy
a.append(['L',[np[-2],np[-1]]])
#reverse direction of path segment
np[-2:] = last[0]+self.vx,last[1]+self.vy
if cmd == 'C':
c1 = np[:2], np[2:4] = np[2:4], np[:2]
a.append([cmd,np[:]])
a.append(['Z',[]])
face = inkex.etree.SubElement(self.facegroup,inkex.addNS('path','svg'),{'d':simplepath.formatPath(a)})
def effect(self):
self.vx = math.cos(math.radians(self.options.angle))*self.options.magnitude
self.vy = math.sin(math.radians(self.options.angle))*self.options.magnitude
for id, node in self.selected.iteritems():
if node.tag == inkex.addNS('path','svg'):
group = inkex.etree.SubElement(node.getparent(),inkex.addNS('g','svg'))
self.facegroup = inkex.etree.SubElement(group, inkex.addNS('g','svg'))
group.append(node)
t = node.get('transform')
if t:
group.set('transform', t)
node.set('transform','')
s = node.get('style')
self.facegroup.set('style', s)
p = simplepath.parsePath(node.get('d'))
for cmd,params in p:
tees = []
if cmd == 'C':
bez = (last,params[:2],params[2:4],params[-2:])
tees = [t for t in bezmisc.beziertatslope(bez,(self.vy,self.vx)) if 0<t<1]
tees.sort()
segments = []
if len(tees) == 0 and cmd in ['L','C']:
segments.append([cmd,params[:]])
elif len(tees) == 1:
one,two = bezmisc.beziersplitatt(bez,tees[0])
segments.append([cmd,list(one[1]+one[2]+one[3])])
segments.append([cmd,list(two[1]+two[2]+two[3])])
elif len(tees) == 2:
one,two = bezmisc.beziersplitatt(bez,tees[0])
two,three = bezmisc.beziersplitatt(two,tees[1])
segments.append([cmd,list(one[1]+one[2]+one[3])])
segments.append([cmd,list(two[1]+two[2]+two[3])])
segments.append([cmd,list(three[1]+three[2]+three[3])])
for seg in segments:
self.makeface(last,seg)
last = seg[1][-2:]
if cmd == 'M':
subPathStart = params[-2:]
if cmd == 'Z':
last = subPathStart
else:
last = params[-2:]
if __name__ == '__main__':
e = Motion()
e.affect()
# vim: expandtab shiftwidth=4 tabstop=8 softtabstop=4 fileencoding=utf-8 textwidth=99
| gpl-2.0 |
b0ttl3z/SickRage | lib/requests/sessions.py | 17 | 27144 | # -*- coding: utf-8 -*-
"""
requests.session
~~~~~~~~~~~~~~~~
This module provides a Session object to manage and persist settings across
requests (cookies, auth, proxies).
"""
import os
import platform
import time
from collections import Mapping
from datetime import timedelta
from .auth import _basic_auth_str
from .compat import cookielib, is_py3, OrderedDict, urljoin, urlparse
from .cookies import (
cookiejar_from_dict, extract_cookies_to_jar, RequestsCookieJar, merge_cookies)
from .models import Request, PreparedRequest, DEFAULT_REDIRECT_LIMIT
from .hooks import default_hooks, dispatch_hook
from ._internal_utils import to_native_string
from .utils import to_key_val_list, default_headers
from .exceptions import (
TooManyRedirects, InvalidSchema, ChunkedEncodingError, ContentDecodingError)
from .structures import CaseInsensitiveDict
from .adapters import HTTPAdapter
from .utils import (
requote_uri, get_environ_proxies, get_netrc_auth, should_bypass_proxies,
get_auth_from_url, rewind_body
)
from .status_codes import codes
# formerly defined here, reexposed here for backward compatibility
from .models import REDIRECT_STATI
# Preferred clock, based on which one is more accurate on a given system.
if platform.system() == 'Windows':
try: # Python 3.3+
preferred_clock = time.perf_counter
except AttributeError: # Earlier than Python 3.
preferred_clock = time.clock
else:
preferred_clock = time.time
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
"""Determines appropriate setting for a given request, taking into account
the explicit setting on that request, and the setting in the session. If a
setting is a dictionary, they will be merged together using `dict_class`
"""
if session_setting is None:
return request_setting
if request_setting is None:
return session_setting
# Bypass if not a dictionary (e.g. verify)
if not (
isinstance(session_setting, Mapping) and
isinstance(request_setting, Mapping)
):
return request_setting
merged_setting = dict_class(to_key_val_list(session_setting))
merged_setting.update(to_key_val_list(request_setting))
# Remove keys that are set to None. Extract keys first to avoid altering
# the dictionary during iteration.
none_keys = [k for (k, v) in merged_setting.items() if v is None]
for key in none_keys:
del merged_setting[key]
return merged_setting
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
"""Properly merges both requests and session hooks.
This is necessary because when request_hooks == {'response': []}, the
merge breaks Session hooks entirely.
"""
if session_hooks is None or session_hooks.get('response') == []:
return request_hooks
if request_hooks is None or request_hooks.get('response') == []:
return session_hooks
return merge_setting(request_hooks, session_hooks, dict_class)
class SessionRedirectMixin(object):
def get_redirect_target(self, resp):
"""Receives a Response. Returns a redirect URI or ``None``"""
if resp.is_redirect:
location = resp.headers['location']
# Currently the underlying http module on py3 decode headers
# in latin1, but empirical evidence suggests that latin1 is very
# rarely used with non-ASCII characters in HTTP headers.
# It is more likely to get UTF8 header rather than latin1.
# This causes incorrect handling of UTF8 encoded location headers.
# To solve this, we re-encode the location in latin1.
if is_py3:
location = location.encode('latin1')
return to_native_string(location, 'utf8')
return None
def resolve_redirects(self, resp, req, stream=False, timeout=None,
verify=True, cert=None, proxies=None, yield_requests=False, **adapter_kwargs):
"""Receives a Response. Returns a generator of Responses or Requests."""
hist = [] # keep track of history
url = self.get_redirect_target(resp)
while url:
prepared_request = req.copy()
# Update history and keep track of redirects.
# resp.history must ignore the original request in this loop
hist.append(resp)
resp.history = hist[1:]
try:
resp.content # Consume socket so it can be released
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
resp.raw.read(decode_content=False)
if len(resp.history) >= self.max_redirects:
raise TooManyRedirects('Exceeded %s redirects.' % self.max_redirects, response=resp)
# Release the connection back into the pool.
resp.close()
# Handle redirection without scheme (see: RFC 1808 Section 4)
if url.startswith('//'):
parsed_rurl = urlparse(resp.url)
url = '%s:%s' % (to_native_string(parsed_rurl.scheme), url)
# The scheme should be lower case...
parsed = urlparse(url)
url = parsed.geturl()
# Facilitate relative 'location' headers, as allowed by RFC 7231.
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
# Compliant with RFC3986, we percent encode the url.
if not parsed.netloc:
url = urljoin(resp.url, requote_uri(url))
else:
url = requote_uri(url)
prepared_request.url = to_native_string(url)
self.rebuild_method(prepared_request, resp)
# https://github.com/requests/requests/issues/1084
if resp.status_code not in (codes.temporary_redirect, codes.permanent_redirect):
# https://github.com/requests/requests/issues/3490
purged_headers = ('Content-Length', 'Content-Type', 'Transfer-Encoding')
for header in purged_headers:
prepared_request.headers.pop(header, None)
prepared_request.body = None
headers = prepared_request.headers
try:
del headers['Cookie']
except KeyError:
pass
# Extract any cookies sent on the response to the cookiejar
# in the new request. Because we've mutated our copied prepared
# request, use the old one that we haven't yet touched.
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
merge_cookies(prepared_request._cookies, self.cookies)
prepared_request.prepare_cookies(prepared_request._cookies)
# Rebuild auth and proxy information.
proxies = self.rebuild_proxies(prepared_request, proxies)
self.rebuild_auth(prepared_request, resp)
# A failed tell() sets `_body_position` to `object()`. This non-None
# value ensures `rewindable` will be True, allowing us to raise an
# UnrewindableBodyError, instead of hanging the connection.
rewindable = (
prepared_request._body_position is not None and
('Content-Length' in headers or 'Transfer-Encoding' in headers)
)
# Attempt to rewind consumed file-like object.
if rewindable:
rewind_body(prepared_request)
# Override the original request.
req = prepared_request
if yield_requests:
yield req
else:
resp = self.send(
req,
stream=stream,
timeout=timeout,
verify=verify,
cert=cert,
proxies=proxies,
allow_redirects=False,
**adapter_kwargs
)
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
# extract redirect url, if any, for the next loop
url = self.get_redirect_target(resp)
yield resp
def rebuild_auth(self, prepared_request, response):
"""When being redirected we may want to strip authentication from the
request to avoid leaking credentials. This method intelligently removes
and reapplies authentication where possible to avoid credential loss.
"""
headers = prepared_request.headers
url = prepared_request.url
if 'Authorization' in headers:
# If we get redirected to a new host, we should strip out any
# authentication headers.
original_parsed = urlparse(response.request.url)
redirect_parsed = urlparse(url)
if (original_parsed.hostname != redirect_parsed.hostname):
del headers['Authorization']
# .netrc might have more auth for us on our new host.
new_auth = get_netrc_auth(url) if self.trust_env else None
if new_auth is not None:
prepared_request.prepare_auth(new_auth)
return
def rebuild_proxies(self, prepared_request, proxies):
"""This method re-evaluates the proxy configuration by considering the
environment variables. If we are redirected to a URL covered by
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
proxy keys for this URL (in case they were stripped by a previous
redirect).
This method also replaces the Proxy-Authorization header where
necessary.
:rtype: dict
"""
proxies = proxies if proxies is not None else {}
headers = prepared_request.headers
url = prepared_request.url
scheme = urlparse(url).scheme
new_proxies = proxies.copy()
no_proxy = proxies.get('no_proxy')
bypass_proxy = should_bypass_proxies(url, no_proxy=no_proxy)
if self.trust_env and not bypass_proxy:
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
proxy = environ_proxies.get(scheme, environ_proxies.get('all'))
if proxy:
new_proxies.setdefault(scheme, proxy)
if 'Proxy-Authorization' in headers:
del headers['Proxy-Authorization']
try:
username, password = get_auth_from_url(new_proxies[scheme])
except KeyError:
username, password = None, None
if username and password:
headers['Proxy-Authorization'] = _basic_auth_str(username, password)
return new_proxies
def rebuild_method(self, prepared_request, response):
"""When being redirected we may want to change the method of the request
based on certain specs or browser behavior.
"""
method = prepared_request.method
# http://tools.ietf.org/html/rfc7231#section-6.4.4
if response.status_code == codes.see_other and method != 'HEAD':
method = 'GET'
# Do what the browsers do, despite standards...
# First, turn 302s into GETs.
if response.status_code == codes.found and method != 'HEAD':
method = 'GET'
# Second, if a POST is responded to with a 301, turn it into a GET.
# This bizarre behaviour is explained in Issue 1704.
if response.status_code == codes.moved and method == 'POST':
method = 'GET'
prepared_request.method = method
class Session(SessionRedirectMixin):
"""A Requests session.
Provides cookie persistence, connection-pooling, and configuration.
Basic Usage::
>>> import requests
>>> s = requests.Session()
>>> s.get('http://httpbin.org/get')
<Response [200]>
Or as a context manager::
>>> with requests.Session() as s:
>>> s.get('http://httpbin.org/get')
<Response [200]>
"""
__attrs__ = [
'headers', 'cookies', 'auth', 'proxies', 'hooks', 'params', 'verify',
'cert', 'prefetch', 'adapters', 'stream', 'trust_env',
'max_redirects',
]
def __init__(self):
#: A case-insensitive dictionary of headers to be sent on each
#: :class:`Request <Request>` sent from this
#: :class:`Session <Session>`.
self.headers = default_headers()
#: Default Authentication tuple or object to attach to
#: :class:`Request <Request>`.
self.auth = None
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
#: be used on each :class:`Request <Request>`.
self.proxies = {}
#: Event-handling hooks.
self.hooks = default_hooks()
#: Dictionary of querystring data to attach to each
#: :class:`Request <Request>`. The dictionary values may be lists for
#: representing multivalued query parameters.
self.params = {}
#: Stream response content default.
self.stream = False
#: SSL Verification default.
self.verify = True
#: SSL client certificate default, if String, path to ssl client
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
self.cert = None
#: Maximum number of redirects allowed. If the request exceeds this
#: limit, a :class:`TooManyRedirects` exception is raised.
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
#: 30.
self.max_redirects = DEFAULT_REDIRECT_LIMIT
#: Trust environment settings for proxy configuration, default
#: authentication and similar.
self.trust_env = True
#: A CookieJar containing all currently outstanding cookies set on this
#: session. By default it is a
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
#: may be any other ``cookielib.CookieJar`` compatible object.
self.cookies = cookiejar_from_dict({})
# Default connection adapters.
self.adapters = OrderedDict()
self.mount('https://', HTTPAdapter())
self.mount('http://', HTTPAdapter())
def __enter__(self):
return self
def __exit__(self, *args):
self.close()
def prepare_request(self, request):
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
transmission and returns it. The :class:`PreparedRequest` has settings
merged from the :class:`Request <Request>` instance and those of the
:class:`Session`.
:param request: :class:`Request` instance to prepare with this
session's settings.
:rtype: requests.PreparedRequest
"""
cookies = request.cookies or {}
# Bootstrap CookieJar.
if not isinstance(cookies, cookielib.CookieJar):
cookies = cookiejar_from_dict(cookies)
# Merge with session cookies
merged_cookies = merge_cookies(
merge_cookies(RequestsCookieJar(), self.cookies), cookies)
# Set environment's basic authentication if not explicitly set.
auth = request.auth
if self.trust_env and not auth and not self.auth:
auth = get_netrc_auth(request.url)
p = PreparedRequest()
p.prepare(
method=request.method.upper(),
url=request.url,
files=request.files,
data=request.data,
json=request.json,
headers=merge_setting(request.headers, self.headers, dict_class=CaseInsensitiveDict),
params=merge_setting(request.params, self.params),
auth=merge_setting(auth, self.auth),
cookies=merged_cookies,
hooks=merge_hooks(request.hooks, self.hooks),
)
return p
def request(self, method, url,
params=None, data=None, headers=None, cookies=None, files=None,
auth=None, timeout=None, allow_redirects=True, proxies=None,
hooks=None, stream=None, verify=None, cert=None, json=None):
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
Returns :class:`Response <Response>` object.
:param method: method for the new :class:`Request` object.
:param url: URL for the new :class:`Request` object.
:param params: (optional) Dictionary or bytes to be sent in the query
string for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send
in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the
:class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the
:class:`Request`.
:param cookies: (optional) Dict or CookieJar object to send with the
:class:`Request`.
:param files: (optional) Dictionary of ``'filename': file-like-objects``
for multipart encoding upload.
:param auth: (optional) Auth tuple or callable to enable
Basic/Digest/Custom HTTP Auth.
:param timeout: (optional) How long to wait for the server to send
data before giving up, as a float, or a :ref:`(connect timeout,
read timeout) <timeouts>` tuple.
:type timeout: float or tuple
:param allow_redirects: (optional) Set to True by default.
:type allow_redirects: bool
:param proxies: (optional) Dictionary mapping protocol or protocol and
hostname to the URL of the proxy.
:param stream: (optional) whether to immediately download the response
content. Defaults to ``False``.
:param verify: (optional) Either a boolean, in which case it controls whether we verify
the server's TLS certificate, or a string, in which case it must be a path
to a CA bundle to use. Defaults to ``True``.
:param cert: (optional) if String, path to ssl client cert file (.pem).
If Tuple, ('cert', 'key') pair.
:rtype: requests.Response
"""
# Create the Request.
req = Request(
method=method.upper(),
url=url,
headers=headers,
files=files,
data=data or {},
json=json,
params=params or {},
auth=auth,
cookies=cookies,
hooks=hooks,
)
prep = self.prepare_request(req)
proxies = proxies or {}
settings = self.merge_environment_settings(
prep.url, proxies, stream, verify, cert
)
# Send the request.
send_kwargs = {
'timeout': timeout,
'allow_redirects': allow_redirects,
}
send_kwargs.update(settings)
resp = self.send(prep, **send_kwargs)
return resp
def get(self, url, **kwargs):
r"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def options(self, url, **kwargs):
r"""Sends a OPTIONS request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', True)
return self.request('OPTIONS', url, **kwargs)
def head(self, url, **kwargs):
r"""Sends a HEAD request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
kwargs.setdefault('allow_redirects', False)
return self.request('HEAD', url, **kwargs)
def post(self, url, data=None, json=None, **kwargs):
r"""Sends a POST request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('POST', url, data=data, json=json, **kwargs)
def put(self, url, data=None, **kwargs):
r"""Sends a PUT request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PUT', url, data=data, **kwargs)
def patch(self, url, data=None, **kwargs):
r"""Sends a PATCH request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body of the :class:`Request`.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('PATCH', url, data=data, **kwargs)
def delete(self, url, **kwargs):
r"""Sends a DELETE request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
:rtype: requests.Response
"""
return self.request('DELETE', url, **kwargs)
def send(self, request, **kwargs):
"""Send a given PreparedRequest.
:rtype: requests.Response
"""
# Set defaults that the hooks can utilize to ensure they always have
# the correct parameters to reproduce the previous request.
kwargs.setdefault('stream', self.stream)
kwargs.setdefault('verify', self.verify)
kwargs.setdefault('cert', self.cert)
kwargs.setdefault('proxies', self.proxies)
# It's possible that users might accidentally send a Request object.
# Guard against that specific failure case.
if isinstance(request, Request):
raise ValueError('You can only send PreparedRequests.')
# Set up variables needed for resolve_redirects and dispatching of hooks
allow_redirects = kwargs.pop('allow_redirects', True)
stream = kwargs.get('stream')
hooks = request.hooks
# Get the appropriate adapter to use
adapter = self.get_adapter(url=request.url)
# Start time (approximately) of the request
start = preferred_clock()
# Send the request
r = adapter.send(request, **kwargs)
# Total elapsed time of the request (approximately)
elapsed = preferred_clock() - start
r.elapsed = timedelta(seconds=elapsed)
# Response manipulation hooks
r = dispatch_hook('response', hooks, r, **kwargs)
# Persist cookies
if r.history:
# If the hooks create history then we want those cookies too
for resp in r.history:
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
extract_cookies_to_jar(self.cookies, request, r.raw)
# Redirect resolving generator.
gen = self.resolve_redirects(r, request, **kwargs)
# Resolve redirects if allowed.
history = [resp for resp in gen] if allow_redirects else []
# Shuffle things around if there's history.
if history:
# Insert the first (original) request at the start
history.insert(0, r)
# Get the last request made
r = history.pop()
r.history = history
# If redirects aren't being followed, store the response on the Request for Response.next().
if not allow_redirects:
try:
r._next = next(self.resolve_redirects(r, request, yield_requests=True, **kwargs))
except StopIteration:
pass
if not stream:
r.content
return r
def merge_environment_settings(self, url, proxies, stream, verify, cert):
"""
Check the environment and merge it with some settings.
:rtype: dict
"""
# Gather clues from the surrounding environment.
if self.trust_env:
# Set environment's proxies.
no_proxy = proxies.get('no_proxy') if proxies is not None else None
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
for (k, v) in env_proxies.items():
proxies.setdefault(k, v)
# Look for requests environment configuration and be compatible
# with cURL.
if verify is True or verify is None:
verify = (os.environ.get('REQUESTS_CA_BUNDLE') or
os.environ.get('CURL_CA_BUNDLE'))
# Merge all the kwargs.
proxies = merge_setting(proxies, self.proxies)
stream = merge_setting(stream, self.stream)
verify = merge_setting(verify, self.verify)
cert = merge_setting(cert, self.cert)
return {'verify': verify, 'proxies': proxies, 'stream': stream,
'cert': cert}
def get_adapter(self, url):
"""
Returns the appropriate connection adapter for the given URL.
:rtype: requests.adapters.BaseAdapter
"""
for (prefix, adapter) in self.adapters.items():
if url.lower().startswith(prefix):
return adapter
# Nothing matches :-/
raise InvalidSchema("No connection adapters were found for '%s'" % url)
def close(self):
"""Closes all adapters and as such the session"""
for v in self.adapters.values():
v.close()
def mount(self, prefix, adapter):
"""Registers a connection adapter to a prefix.
Adapters are sorted in descending order by key length.
"""
self.adapters[prefix] = adapter
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
for key in keys_to_move:
self.adapters[key] = self.adapters.pop(key)
def __getstate__(self):
state = dict((attr, getattr(self, attr, None)) for attr in self.__attrs__)
return state
def __setstate__(self, state):
for attr, value in state.items():
setattr(self, attr, value)
def session():
"""
Returns a :class:`Session` for context-management.
:rtype: Session
"""
return Session()
| gpl-3.0 |
40423152/2017springcd_hw | plugin/liquid_tags/mdx_liquid_tags.py | 281 | 3447 | """
Markdown Extension for Liquid-style Tags
----------------------------------------
A markdown extension to allow user-defined tags of the form::
{% tag arg1 arg2 ... argn %}
Where "tag" is associated with some user-defined extension.
These result in a preprocess step within markdown that produces
either markdown or html.
"""
import warnings
import markdown
import itertools
import re
import os
from functools import wraps
# Define some regular expressions
LIQUID_TAG = re.compile(r'\{%.*?%\}', re.MULTILINE | re.DOTALL)
EXTRACT_TAG = re.compile(r'(?:\s*)(\S+)(?:\s*)')
LT_CONFIG = { 'CODE_DIR': 'code',
'NOTEBOOK_DIR': 'notebooks',
'FLICKR_API_KEY': 'flickr',
'GIPHY_API_KEY': 'giphy'
}
LT_HELP = { 'CODE_DIR' : 'Code directory for include_code subplugin',
'NOTEBOOK_DIR' : 'Notebook directory for notebook subplugin',
'FLICKR_API_KEY': 'Flickr key for accessing the API',
'GIPHY_API_KEY': 'Giphy key for accessing the API'
}
class _LiquidTagsPreprocessor(markdown.preprocessors.Preprocessor):
_tags = {}
def __init__(self, configs):
self.configs = configs
def run(self, lines):
page = '\n'.join(lines)
liquid_tags = LIQUID_TAG.findall(page)
for i, markup in enumerate(liquid_tags):
# remove {% %}
markup = markup[2:-2]
tag = EXTRACT_TAG.match(markup).groups()[0]
markup = EXTRACT_TAG.sub('', markup, 1)
if tag in self._tags:
liquid_tags[i] = self._tags[tag](self, tag, markup.strip())
# add an empty string to liquid_tags so that chaining works
liquid_tags.append('')
# reconstruct string
page = ''.join(itertools.chain(*zip(LIQUID_TAG.split(page),
liquid_tags)))
# resplit the lines
return page.split("\n")
class LiquidTags(markdown.Extension):
"""Wrapper for MDPreprocessor"""
def __init__(self, config):
try:
# Needed for markdown versions >= 2.5
for key,value in LT_CONFIG.items():
self.config[key] = [value,LT_HELP[key]]
super(LiquidTags,self).__init__(**config)
except AttributeError:
# Markdown versions < 2.5
for key,value in LT_CONFIG.items():
config[key] = [config[key],LT_HELP[key]]
super(LiquidTags,self).__init__(config)
@classmethod
def register(cls, tag):
"""Decorator to register a new include tag"""
def dec(func):
if tag in _LiquidTagsPreprocessor._tags:
warnings.warn("Enhanced Markdown: overriding tag '%s'" % tag)
_LiquidTagsPreprocessor._tags[tag] = func
return func
return dec
def extendMarkdown(self, md, md_globals):
self.htmlStash = md.htmlStash
md.registerExtension(self)
# for the include_code preprocessor, we need to re-run the
# fenced code block preprocessor after substituting the code.
# Because the fenced code processor is run before, {% %} tags
# within equations will not be parsed as an include.
md.preprocessors.add('mdincludes',
_LiquidTagsPreprocessor(self), ">html_block")
def makeExtension(configs=None):
"""Wrapper for a MarkDown extension"""
return LiquidTags(configs=configs)
| agpl-3.0 |
dquartul/BLonD | blond/monitors/monitors.py | 2 | 20935 |
# Copyright 2016 CERN. This software is distributed under the
# terms of the GNU General Public Licence version 3 (GPL Version 3),
# copied verbatim in the file LICENCE.md.
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization or
# submit itself to any jurisdiction.
# Project website: http://blond.web.cern.ch/
'''
**Module to save beam statistics in h5 files**
:Authors: **Danilo Quartullo**, **Helga Timko**
'''
from builtins import object
import h5py as hp
import numpy as np
class BunchMonitor(object):
''' Class able to save bunch data into h5 file. Use 'buffer_time' to select
the frequency of saving to file in number of turns.
If in the constructor a Profile object is passed, that means that one
wants to save the gaussian-fit bunch length as well (obviously the
Profile object has to have the fit_option set to 'gaussian').
'''
def __init__(self, Ring, RFParameters, Beam, filename,
buffer_time=None,
Profile=None, PhaseLoop=None, LHCNoiseFB=None):
self.filename = filename
self.n_turns = Ring.n_turns
self.i_turn = 0
self.buffer_time = buffer_time
if buffer_time == None:
self.buffer_time = self.n_turns
self.rf_params = RFParameters
self.beam = Beam
self.profile = Profile
if self.profile:
if self.profile.fit_option != None:
self.fit_option = True
else:
self.fit_option = False
else:
self.fit_option = False
self.PL = PhaseLoop
self.LHCNoiseFB = LHCNoiseFB
# Initialise data and save initial state
self.init_data(self.filename, (self.n_turns + 1,))
# Track at initialisation
self.track()
def track(self):
self.beam.statistics()
# Write buffer with i_turn = RFcounter - 1
self.write_buffer()
# Synchronise to i_turn = RFcounter
self.i_turn += 1
if self.i_turn > 0 and (self.i_turn % self.buffer_time) == 0:
self.open()
self.write_data(self.h5file['Beam'], (self.n_turns + 1,))
self.close()
self.init_buffer()
def init_data(self, filename, dims):
# Prepare data
self.beam.statistics()
# Open file
self.h5file = hp.File(filename + '.h5', 'w')
self.h5file.require_group('Beam')
# Create datasets and write first data points
h5group = self.h5file['Beam']
h5group.create_dataset("n_macroparticles_alive", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["n_macroparticles_alive"][0] = self.beam.n_macroparticles_alive
h5group.create_dataset("mean_dt", shape=dims, dtype='f',
compression="gzip", compression_opts=9)
h5group["mean_dt"][0] = self.beam.mean_dt
h5group.create_dataset("mean_dE", shape=dims, dtype='f',
compression="gzip", compression_opts=9)
h5group["mean_dE"][0] = self.beam.mean_dE
h5group.create_dataset("sigma_dt", shape=dims, dtype='f',
compression="gzip", compression_opts=9)
h5group["sigma_dt"][0] = self.beam.sigma_dt
h5group.create_dataset("sigma_dE", shape=dims, dtype='f',
compression="gzip", compression_opts=9)
h5group["sigma_dE"][0] = self.beam.sigma_dE
h5group.create_dataset("epsn_rms_l", shape=dims, dtype='f',
compression="gzip", compression_opts=9)
h5group["epsn_rms_l"][0] = self.beam.epsn_rms_l
if self.fit_option == True:
h5group.create_dataset("bunch_length", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["bunch_length"][0] = self.profile.bunchLength
if self.PL:
h5group.create_dataset("PL_omegaRF", shape=dims,
dtype=np.float64,
compression="gzip", compression_opts=9)
h5group["PL_omegaRF"][0] = self.rf_params.omega_rf[0, 0]
h5group.create_dataset("PL_phiRF", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["PL_phiRF"][0] = self.rf_params.phi_rf[0, 0]
h5group.create_dataset("PL_bunch_phase", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["PL_bunch_phase"][0] = self.PL.phi_beam
h5group.create_dataset("PL_phase_corr", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["PL_phase_corr"][0] = self.PL.dphi
h5group.create_dataset("PL_omegaRF_corr", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["PL_omegaRF_corr"][0] = self.PL.domega_rf
h5group.create_dataset("SL_dphiRF", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["SL_dphiRF"][0] = self.rf_params.dphi_rf[0]
h5group.create_dataset("RL_drho", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["RL_drho"][0] = self.PL.drho
if self.LHCNoiseFB:
h5group.create_dataset("LHC_noise_FB_factor", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["LHC_noise_FB_factor"][0] = self.LHCNoiseFB.x
h5group.create_dataset("LHC_noise_FB_bl", shape=dims,
dtype='f',
compression="gzip", compression_opts=9)
h5group["LHC_noise_FB_bl"][0] = self.LHCNoiseFB.bl_meas
if self.LHCNoiseFB.bl_meas_bbb != None:
h5group.create_dataset("LHC_noise_FB_bl_bbb",
shape=(self.n_turns + 1,
len(self.LHCNoiseFB.bl_meas_bbb)),
dtype='f', compression="gzip",
compression_opts=9)
h5group["LHC_noise_FB_bl_bbb"][0,
:] = self.LHCNoiseFB.bl_meas_bbb[:]
# Close file
self.close()
# Initialise buffer for next turn
self.init_buffer()
def init_buffer(self):
self.b_np_alive = np.zeros(self.buffer_time)
self.b_mean_dt = np.zeros(self.buffer_time)
self.b_mean_dE = np.zeros(self.buffer_time)
self.b_sigma_dt = np.zeros(self.buffer_time)
self.b_sigma_dE = np.zeros(self.buffer_time)
self.b_epsn_rms = np.zeros(self.buffer_time)
if self.fit_option == True:
self.b_bl = np.zeros(self.buffer_time)
if self.PL:
self.b_PL_omegaRF = np.zeros(self.buffer_time)
self.b_PL_phiRF = np.zeros(self.buffer_time)
self.b_PL_bunch_phase = np.zeros(self.buffer_time)
self.b_PL_phase_corr = np.zeros(self.buffer_time)
self.b_PL_omegaRF_corr = np.zeros(self.buffer_time)
self.b_SL_dphiRF = np.zeros(self.buffer_time)
self.b_RL_drho = np.zeros(self.buffer_time)
if self.LHCNoiseFB:
self.b_LHCnoiseFB_factor = np.zeros(self.buffer_time)
self.b_LHCnoiseFB_bl = np.zeros(self.buffer_time)
if self.LHCNoiseFB.bl_meas_bbb != None:
self.b_LHCnoiseFB_bl_bbb = np.zeros((self.buffer_time,
len(self.LHCNoiseFB.bl_meas_bbb)))
def write_buffer(self):
i = self.i_turn % self.buffer_time
self.b_np_alive[i] = self.beam.n_macroparticles_alive
self.b_mean_dt[i] = self.beam.mean_dt
self.b_mean_dE[i] = self.beam.mean_dE
self.b_sigma_dt[i] = self.beam.sigma_dt
self.b_sigma_dE[i] = self.beam.sigma_dE
self.b_epsn_rms[i] = self.beam.epsn_rms_l
if self.fit_option == True:
self.b_bl[i] = self.profile.bunchLength
if self.PL:
self.b_PL_omegaRF[i] = self.rf_params.omega_rf[0, self.i_turn]
self.b_PL_phiRF[i] = self.rf_params.phi_rf[0, self.i_turn]
self.b_PL_bunch_phase[i] = self.PL.phi_beam
self.b_PL_phase_corr[i] = self.PL.dphi
self.b_PL_omegaRF_corr[i] = self.PL.domega_rf
self.b_SL_dphiRF[i] = self.rf_params.dphi_rf[0]
self.b_RL_drho[i] = self.PL.drho
if self.LHCNoiseFB:
self.b_LHCnoiseFB_factor[i] = self.LHCNoiseFB.x
self.b_LHCnoiseFB_bl[i] = self.LHCNoiseFB.bl_meas
if self.LHCNoiseFB.bl_meas_bbb != None:
self.b_LHCnoiseFB_bl_bbb[i, :] = self.LHCNoiseFB.bl_meas_bbb[:]
def write_data(self, h5group, dims):
i1 = self.i_turn - self.buffer_time
i2 = self.i_turn
h5group.require_dataset("n_macroparticles_alive", shape=dims,
dtype='f')
h5group["n_macroparticles_alive"][i1:i2] = self.b_np_alive[:]
h5group.require_dataset("mean_dt", shape=dims, dtype='f')
h5group["mean_dt"][i1:i2] = self.b_mean_dt[:]
h5group.require_dataset("mean_dE", shape=dims, dtype='f')
h5group["mean_dE"][i1:i2] = self.b_mean_dE[:]
h5group.require_dataset("sigma_dt", shape=dims, dtype='f')
h5group["sigma_dt"][i1:i2] = self.b_sigma_dt[:]
h5group.require_dataset("sigma_dE", shape=dims, dtype='f')
h5group["sigma_dE"][i1:i2] = self.b_sigma_dE[:]
h5group.require_dataset("epsn_rms_l", shape=dims, dtype='f')
h5group["epsn_rms_l"][i1:i2] = self.b_epsn_rms[:]
if self.fit_option == True:
h5group.require_dataset("bunch_length", shape=dims,
dtype='f')
h5group["bunch_length"][i1:i2] = self.b_bl[:]
if self.PL:
h5group.require_dataset("PL_omegaRF", shape=dims,
dtype=np.float64)
h5group["PL_omegaRF"][i1:i2] = self.b_PL_omegaRF[:]
h5group.require_dataset("PL_phiRF", shape=dims,
dtype='f')
h5group["PL_phiRF"][i1:i2] = self.b_PL_phiRF[:]
h5group.require_dataset("PL_bunch_phase", shape=dims,
dtype='f')
h5group["PL_bunch_phase"][i1:i2] = self.b_PL_bunch_phase[:]
h5group.require_dataset("PL_phase_corr", shape=dims,
dtype='f')
h5group["PL_phase_corr"][i1:i2] = self.b_PL_phase_corr[:]
h5group.require_dataset("PL_omegaRF_corr", shape=dims,
dtype='f')
h5group["PL_omegaRF_corr"][i1:i2] = self.b_PL_omegaRF_corr[:]
h5group.require_dataset("SL_dphiRF", shape=dims,
dtype='f')
h5group["SL_dphiRF"][i1:i2] = self.b_SL_dphiRF[:]
h5group.require_dataset("RL_drho", shape=dims,
dtype='f')
h5group["RL_drho"][i1:i2] = self.b_RL_drho[:]
if self.LHCNoiseFB:
h5group.require_dataset("LHC_noise_FB_factor", shape=dims,
dtype='f')
h5group["LHC_noise_FB_factor"][i1:i2] = self.b_LHCnoiseFB_factor[:]
h5group.require_dataset("LHC_noise_FB_bl", shape=dims,
dtype='f')
h5group["LHC_noise_FB_bl"][i1:i2] = self.b_LHCnoiseFB_bl[:]
if self.LHCNoiseFB.bl_meas_bbb != None:
h5group.require_dataset("LHC_noise_FB_bl_bbb", shape=(self.n_turns + 1,
len(self.LHCNoiseFB.bl_meas_bbb)),
dtype='f')
h5group["LHC_noise_FB_bl_bbb"][i1:i2,
:] = self.b_LHCnoiseFB_bl_bbb[:, :]
def open(self):
self.h5file = hp.File(self.filename + '.h5', 'r+')
self.h5file.require_group('Beam')
def close(self):
self.h5file.close()
class SlicesMonitor(object):
''' Class able to save the bunch profile, i.e. the histogram derived from
the slicing.
'''
def __init__(self, filename, n_turns, profile):
self.h5file = hp.File(filename + '.h5', 'w')
self.n_turns = n_turns
self.i_turn = 0
self.profile = profile
self.h5file.create_group('Slices')
def track(self, bunch):
if not self.i_turn:
self.create_data(self.h5file['Slices'], (self.profile.n_slices,
self.n_turns))
self.write_data(self.profile, self.h5file['Slices'], self.i_turn)
else:
self.write_data(self.profile, self.h5file['Slices'], self.i_turn)
self.i_turn += 1
def create_data(self, h5group, dims):
h5group.create_dataset("n_macroparticles", dims, compression="gzip",
compression_opts=9)
def write_data(self, bunch, h5group, i_turn):
h5group["n_macroparticles"][:, i_turn] = self.profile.n_macroparticles
def close(self):
self.h5file.close()
class MultiBunchMonitor(object):
''' Class able to save multi-bunch profile, i.e. the histogram derived from
the slicing.
'''
def __init__(self, filename, n_turns, profile, rf, Nbunches, buffer_size=100):
self.h5file = hp.File(filename + '.h5', 'w')
self.n_turns = n_turns
self.i_turn = 0
self.profile = profile
self.rf = rf
self.beam = self.profile.Beam
self.h5file.create_group('default')
self.h5group = self.h5file['default']
self.Nbunches = Nbunches
self.buffer_size = buffer_size
self.last_save = 0
self.create_data('profile', self.h5file['default'],
(self.n_turns, self.profile.n_slices), dtype='int32')
self.b_profile = np.zeros(
(self.buffer_size, self.profile.n_slices), dtype='int32')
self.create_data(
'turns', self.h5file['default'], (self.n_turns, ), dtype='int32')
self.b_turns = np.zeros(self.buffer_size, dtype='int32')
self.create_data('losses', self.h5file['default'],
(self.n_turns, ), dtype='int')
self.b_losses = np.zeros(
(self.buffer_size, ), dtype='int32')
self.create_data('fwhm_bunch_position', self.h5file['default'],
(self.n_turns, self.Nbunches), dtype='float64')
self.b_fwhm_bunch_position = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
self.create_data('fwhm_bunch_length', self.h5file['default'],
(self.n_turns, self.Nbunches), dtype='float64')
self.b_fwhm_bunch_length = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
if self.Nbunches == 1:
# All these can be calculated only when single bunch
self.create_data(
'mean_dE', self.h5file['default'], (
self.n_turns, self.Nbunches),
dtype='float64')
self.create_data(
'dE_norm', self.h5file['default'], (
self.n_turns, self.Nbunches),
dtype='float64')
self.create_data(
'mean_dt', self.h5file['default'], (
self.n_turns, self.Nbunches),
dtype='float64')
self.create_data(
'dt_norm', self.h5file['default'], (
self.n_turns, self.Nbunches),
dtype='float64')
self.create_data(
'std_dE', self.h5file['default'], (self.n_turns, self.Nbunches),
dtype='float64')
self.create_data(
'std_dt', self.h5file['default'], (self.n_turns, self.Nbunches),
dtype='float64')
self.b_mean_dE = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
self.b_mean_dt = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
self.b_dE_norm = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
self.b_dt_norm = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
self.b_std_dE = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
self.b_std_dt = np.zeros(
(self.buffer_size, self.Nbunches), dtype=float)
def __del__(self):
if self.i_turn > self.last_save:
self.write_data()
# self.h5file.close()
def write_buffer(self, turn):
# Nppb = int(self.profile.Beam.n_macroparticles // self.Nbunches)
# mean_dE = np.zeros(self.Nbunches, dtype=float)
# mean_dt = np.zeros(self.Nbunches, dtype=float)
# std_dE = np.zeros(self.Nbunches, dtype=float)
# std_dt = np.zeros(self.Nbunches, dtype=float)
# for i in range(self.Nbunches):
# mean_dE[i] = np.mean(self.profile.Beam.dE[i*Nppb:(i+1)*Nppb])
# mean_dt[i] = np.mean(self.profile.Beam.dt[i*Nppb:(i+1)*Nppb])
# std_dE[i] = np.std(self.profile.Beam.dE[i*Nppb:(i+1)*Nppb])
# std_dt[i] = np.std(self.profile.Beam.dt[i*Nppb:(i+1)*Nppb])
idx = self.i_turn % self.buffer_size
self.b_turns[idx] = turn
self.b_profile[idx] = self.profile.n_macroparticles.astype(np.int32)
self.b_losses[idx] = self.beam.losses
self.b_fwhm_bunch_position[idx] = self.profile.bunchPosition
self.b_fwhm_bunch_length[idx] = self.profile.bunchLength
if self.Nbunches == 1:
self.b_mean_dE[idx] = self.beam.mean_dE
self.b_mean_dt[idx] = self.beam.mean_dt
self.b_std_dE[idx] = self.beam.sigma_dE
self.b_std_dt[idx] = self.beam.sigma_dt
self.b_dE_norm[idx] = self.rf.voltage[0, turn]
if turn == 0:
self.b_dt_norm[idx] = self.rf.t_rev[0] * self.rf.eta_0[0] * \
self.rf.voltage[0, 0] / \
(self.rf.beta[0]**2 * self.rf.energy[0])
else:
self.b_dt_norm[idx] = self.rf.t_rev[turn] * self.rf.eta_0[turn] * \
self.rf.voltage[0, turn-1] / \
(self.rf.beta[turn]**2 * self.rf.energy[turn])
def write_data(self):
i1_h5 = self.last_save
i2_h5 = self.i_turn
i1_b = 0
i2_b = self.i_turn - self.last_save
# print("i1_h5, i2_h5:{}-{}".format(i1_h5, i2_h5))
self.last_save = self.i_turn
self.h5group['turns'][i1_h5:i2_h5] = self.b_turns[i1_b:i2_b]
self.h5group['profile'][i1_h5:i2_h5] = self.b_profile[i1_b:i2_b]
self.h5group['losses'][i1_h5:i2_h5] = self.b_losses[i1_b:i2_b]
self.h5group['fwhm_bunch_position'][i1_h5:i2_h5] = self.b_fwhm_bunch_position[i1_b:i2_b]
self.h5group['fwhm_bunch_length'][i1_h5:i2_h5] = self.b_fwhm_bunch_length[i1_b:i2_b]
if self.Nbunches == 1:
self.h5group['mean_dE'][i1_h5:i2_h5] = self.b_mean_dE[i1_b:i2_b]
self.h5group['dE_norm'][i1_h5:i2_h5] = self.b_dE_norm[i1_b:i2_b]
self.h5group['dt_norm'][i1_h5:i2_h5] = self.b_dt_norm[i1_b:i2_b]
self.h5group['mean_dt'][i1_h5:i2_h5] = self.b_mean_dt[i1_b:i2_b]
self.h5group['std_dE'][i1_h5:i2_h5] = self.b_std_dE[i1_b:i2_b]
self.h5group['std_dt'][i1_h5:i2_h5] = self.b_std_dt[i1_b:i2_b]
def track(self, turn):
self.write_buffer(turn)
self.i_turn += 1
if (self.i_turn > 0) and (self.i_turn % self.buffer_size == 0):
self.write_data()
def create_data(self, name, h5group, dims, dtype):
h5group.create_dataset(name, dims, compression='gzip',
compression_opts=4, dtype=dtype, shuffle=True)
def close(self):
if self.i_turn > self.last_save:
self.write_data()
self.h5file.close()
| gpl-3.0 |
alex-pardo/ANLP-PROJECT | parseCities.py | 1 | 3827 |
import re
from string import *
import sys
from nltk import *
import locale
from wikitools import wiki
from wikitools import api
from wikitools import page
from wikitools import category
wikiAPI = {
'en': "http://en.wikipedia.org/w/api.php"}
site = wiki.Wiki(wikiAPI['en'])
##An object of the class Page associated to the demonyms page
cities = []
rule = re.compile(u'.*\[\[([\w\s]+)\]\].*',re.L)
r1 = re.compile(r'.*\[\[((List of )([A-Za-z]{1,}[\s]?)+)\]\].*')
r2 = re.compile(r'.*\[\[([A-Z]{1}([a-z]{1,}[\s]*)+)\]\].*')
lists = ['List_of_cities_in_Africa', 'List_of_cities_in_Asia', 'List_of_cities_in_Oceania', 'List_of_cities_in_Europe']
#lists = ['List_of_cities_in_Europe']
for l in lists:
p = page.Page(site, l, sectionnumber='1')
for line in p.getWikiText().split('\n'):
tmp = r1.findall(line)
if len(tmp) > 0:
link = tmp[0][0]
print link.encode('utf-8')
sc = page.Page(site, link, sectionnumber='1')
try:
text = sc.getWikiText().split('\n')
except:
continue
text = map(lambda x:guess_encoding(x)[0],text)
#print text
for line in text:
if 'ref' in line:
continue
try:
#print rule.match(line).group()[0]
tmp = rule.findall(line)
if len(tmp) > 0:
if tmp[0] not in cities:
if len(tmp[0].split(' ')) < 2:
cities.append(tmp[0])
except Exception, e:
pass
# m = re.match(rule,line)
# print m.group()[0]
print len(cities)
with open("cities.csv", 'w') as f:
for city in cities:
f.write(str(city + '\n'))
##For detecting coding and transforming to Unicode
##########################################################################
# Guess Character Encoding
##########################################################################
# adapted from io.py in the docutils extension module (http://docutils.sourceforge.net)
# http://www.pyzine.com/Issue008/Section_Articles/article_Encodings.html
def guess_encoding(data):
"""
Given a byte string, attempt to decode it.
Tries the standard 'UTF8' and 'latin-1' encodings,
Plus several gathered from locale information.
The calling program *must* first call::
locale.setlocale(locale.LC_ALL, '')
If successful it returns C{(decoded_unicode, successful_encoding)}.
If unsuccessful it raises a C{UnicodeError}.
"""
successful_encoding = None
# we make 'utf-8' the first encoding
encodings = ['utf-8']
#
# next we add anything we can learn from the locale
try:
encodings.append(locale.nl_langinfo(locale.CODESET))
except AttributeError:
pass
try:
encodings.append(locale.getlocale()[1])
except (AttributeError, IndexError):
pass
try:
encodings.append(locale.getdefaultlocale()[1])
except (AttributeError, IndexError):
pass
#
# we try 'latin-1' last
encodings.append('latin-1')
for enc in encodings:
# some of the locale calls
# may have returned None
if not enc:
continue
try:
decoded = unicode(data, enc)
successful_encoding = enc
except (UnicodeError, LookupError):
pass
else:
break
if not successful_encoding:
raise UnicodeError(
'Unable to decode input data. Tried the following encodings: %s.'
% ', '.join([repr(enc) for enc in encodings if enc]))
else:
return (decoded, successful_encoding)
| apache-2.0 |
mohsenmalmir/DeepLearningStack | DeepLearningStack/ConvNet/LRN.py | 1 | 1984 | import os
import sys
import time
import numpy as np
import theano
import theano.tensor as T
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.sandbox.cuda.basic_ops import gpu_contiguous
from pylearn2.sandbox.cuda_convnet.pool import MaxPool
from pylearn2.sandbox.cuda_convnet.response_norm import CrossMapNorm
# implementing local response normalization
class LRN(object):
""" Initialize from xml definition node """
def __init__(self,layer_def,input,input_shape,rs,clone_from=None):
"""
Create a local response normalization layer, using crossmap normalization within the specified neighborhood size
:type layer_def: Element, xml containing configu for Conv layer
:type input: tensor.tensor4
:type rng: a random number generator used to initialize weights
"""
normSizeAlphaBeta = [ int(layer_def.find("normsize").text),
float(layer_def.find("normalpha").text),
float(layer_def.find("normbeta").text)]
self.init(input, input_shape, normSizeAlphaBeta)
"""Pool Layer of a convolutional network """
def init(self, input, input_shape, normSizeAlphaBeta):
"""
Allocate a LeNetConvPoolLayer with shared variable internal parameters.
type normSizeAlphaBeta: tuple or list of size 3
:param normSizeAlphaBeta: (size,alpha,beta)
"""
self.input = gpu_contiguous(input)
#NORM
norm_op = CrossMapNorm( size_f=normSizeAlphaBeta[0], add_scale=normSizeAlphaBeta[1], pow_scale=normSizeAlphaBeta[2], blocked=True)
self.output = norm_op(self.input)[0]
self.input_shape = input_shape
self.output_shape = input_shape
self.params = []
| mit |
ccubed/Earl | unit_tests.py | 1 | 1812 | # -*- coding: utf-8; -*-
import unittest
import earl
class TestEarlPacking(unittest.TestCase):
def test_smallint(self):
self.assertEqual(earl.pack(10), bytes([131,97,10]))
def test_bigint(self):
self.assertEqual(earl.pack(1200), bytes([131,98,0,0,4,176]))
def test_floats(self):
self.assertEqual(earl.pack(3.141592), bytes([131,70,64,9,33,250,252,139,0,122]))
def test_map(self):
self.assertEqual(earl.pack({"d":10}), bytes([131,116,0,0,0,1,109,0,0,0,1,100,97,10]))
def test_list(self):
self.assertEqual(earl.pack([1,2,3]), bytes([131,108,0,0,0,3,97,1,97,2,97,3,106]))
def test_nil(self):
self.assertEqual(earl.pack([]), bytes([131,106]))
class TestEarlUnpacking(unittest.TestCase):
def test_smallint(self):
self.assertEqual(earl.unpack(bytes([131,97,234])), 234)
def test_bigint(self):
self.assertEqual(earl.unpack(bytes([131,98,0,0,214,216])), 55000)
def test_floats(self):
self.assertEqual(earl.unpack(bytes([131,70,64,108,42,225,71,174,20,123])), 225.34)
def test_stringext(self):
self.assertEqual(earl.unpack(bytes([131,107,0,3,1,2,3])), bytes([1,2,3]))
def test_list(self):
self.assertEqual(earl.unpack(bytes([131,108,0,0,0,3,97,1,97,2,97,3,106])), [1,2,3])
def test_map(self):
self.assertEqual(earl.unpack(bytes([131,116,0,0,0,1,100,0,1,97,97,150])), {'a': 150})
def test_nil(self):
self.assertEqual(earl.unpack(bytes([131,106])), [])
def test_atom(self):
self.assertEqual(earl.unpack(bytes([131,100,0,5,104,101,108,108,111])), "hello")
def test_utf8(self):
self.assertEqual(earl.unpack(bytes([131,107,0,6,233,153,176,233,153,189]), encoding="utf8"), "陰陽")
if __name__ == "__main__":
unittest.main()
| mit |
pombredanne/django-inplaceedit | testing/urls.py | 3 | 1057 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
js_info_dict = {
'packages': ('django.conf',),
}
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'testing.views.home', name='home'),
# url(r'^testing/', include('testing.foo.urls')),
url(r'^$', include('multimediaresources.urls')),
url(r'^inplaceeditform/', include('inplaceeditform.urls')),
url(r'^jsi18n/$', 'django.views.i18n.javascript_catalog', js_info_dict),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
url(r'^admin/', include(admin.site.urls)),
url(r'^fk/', include('test_fk.urls')),
)
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$',
'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT}),
)
| lgpl-3.0 |
suncycheng/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/relatedapp/tests.py | 123 | 13948 | from django.test import TestCase
from django.contrib.gis.geos import GEOSGeometry, Point, MultiPoint
from django.contrib.gis.db.models import Collect, Count, Extent, F, Union
from django.contrib.gis.geometry.backend import Geometry
from django.contrib.gis.tests.utils import mysql, oracle, no_mysql, no_oracle, no_spatialite
from models import City, Location, DirectoryEntry, Parcel, Book, Author, Article
class RelatedGeoModelTest(TestCase):
def test02_select_related(self):
"Testing `select_related` on geographic models (see #7126)."
qs1 = City.objects.all()
qs2 = City.objects.select_related()
qs3 = City.objects.select_related('location')
# Reference data for what's in the fixtures.
cities = (
('Aurora', 'TX', -97.516111, 33.058333),
('Roswell', 'NM', -104.528056, 33.387222),
('Kecksburg', 'PA', -79.460734, 40.18476),
)
for qs in (qs1, qs2, qs3):
for ref, c in zip(cities, qs):
nm, st, lon, lat = ref
self.assertEqual(nm, c.name)
self.assertEqual(st, c.state)
self.assertEqual(Point(lon, lat), c.location.point)
@no_mysql
def test03_transform_related(self):
"Testing the `transform` GeoQuerySet method on related geographic models."
# All the transformations are to state plane coordinate systems using
# US Survey Feet (thus a tolerance of 0 implies error w/in 1 survey foot).
tol = 0
def check_pnt(ref, pnt):
self.assertAlmostEqual(ref.x, pnt.x, tol)
self.assertAlmostEqual(ref.y, pnt.y, tol)
self.assertEqual(ref.srid, pnt.srid)
# Each city transformed to the SRID of their state plane coordinate system.
transformed = (('Kecksburg', 2272, 'POINT(1490553.98959621 314792.131023984)'),
('Roswell', 2257, 'POINT(481902.189077221 868477.766629735)'),
('Aurora', 2276, 'POINT(2269923.2484839 7069381.28722222)'),
)
for name, srid, wkt in transformed:
# Doing this implicitly sets `select_related` select the location.
# TODO: Fix why this breaks on Oracle.
qs = list(City.objects.filter(name=name).transform(srid, field_name='location__point'))
check_pnt(GEOSGeometry(wkt, srid), qs[0].location.point)
@no_mysql
@no_spatialite
def test04a_related_extent_aggregate(self):
"Testing the `extent` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Extent('location__point'))
# One for all locations, one that excludes New Mexico (Roswell).
all_extent = (-104.528056, 29.763374, -79.460734, 40.18476)
txpa_extent = (-97.516111, 29.763374, -79.460734, 40.18476)
e1 = City.objects.extent(field_name='location__point')
e2 = City.objects.exclude(state='NM').extent(field_name='location__point')
e3 = aggs['location__point__extent']
# The tolerance value is to four decimal places because of differences
# between the Oracle and PostGIS spatial backends on the extent calculation.
tol = 4
for ref, e in [(all_extent, e1), (txpa_extent, e2), (all_extent, e3)]:
for ref_val, e_val in zip(ref, e): self.assertAlmostEqual(ref_val, e_val, tol)
@no_mysql
def test04b_related_union_aggregate(self):
"Testing the `unionagg` GeoQuerySet aggregates on related geographic models."
# This combines the Extent and Union aggregates into one query
aggs = City.objects.aggregate(Union('location__point'))
# These are the points that are components of the aggregate geographic
# union that is returned. Each point # corresponds to City PK.
p1 = Point(-104.528056, 33.387222)
p2 = Point(-97.516111, 33.058333)
p3 = Point(-79.460734, 40.18476)
p4 = Point(-96.801611, 32.782057)
p5 = Point(-95.363151, 29.763374)
# Creating the reference union geometry depending on the spatial backend,
# as Oracle will have a different internal ordering of the component
# geometries than PostGIS. The second union aggregate is for a union
# query that includes limiting information in the WHERE clause (in other
# words a `.filter()` precedes the call to `.unionagg()`).
if oracle:
ref_u1 = MultiPoint(p4, p5, p3, p1, p2, srid=4326)
ref_u2 = MultiPoint(p3, p2, srid=4326)
else:
# Looks like PostGIS points by longitude value.
ref_u1 = MultiPoint(p1, p2, p4, p5, p3, srid=4326)
ref_u2 = MultiPoint(p2, p3, srid=4326)
u1 = City.objects.unionagg(field_name='location__point')
u2 = City.objects.exclude(name__in=('Roswell', 'Houston', 'Dallas', 'Fort Worth')).unionagg(field_name='location__point')
u3 = aggs['location__point__union']
self.assertEqual(ref_u1, u1)
self.assertEqual(ref_u2, u2)
self.assertEqual(ref_u1, u3)
def test05_select_related_fk_to_subclass(self):
"Testing that calling select_related on a query over a model with an FK to a model subclass works"
# Regression test for #9752.
l = list(DirectoryEntry.objects.all().select_related())
def test06_f_expressions(self):
"Testing F() expressions on GeometryFields."
# Constructing a dummy parcel border and getting the City instance for
# assigning the FK.
b1 = GEOSGeometry('POLYGON((-97.501205 33.052520,-97.501205 33.052576,-97.501150 33.052576,-97.501150 33.052520,-97.501205 33.052520))', srid=4326)
pcity = City.objects.get(name='Aurora')
# First parcel has incorrect center point that is equal to the City;
# it also has a second border that is different from the first as a
# 100ft buffer around the City.
c1 = pcity.location.point
c2 = c1.transform(2276, clone=True)
b2 = c2.buffer(100)
p1 = Parcel.objects.create(name='P1', city=pcity, center1=c1, center2=c2, border1=b1, border2=b2)
# Now creating a second Parcel where the borders are the same, just
# in different coordinate systems. The center points are also the
# the same (but in different coordinate systems), and this time they
# actually correspond to the centroid of the border.
c1 = b1.centroid
c2 = c1.transform(2276, clone=True)
p2 = Parcel.objects.create(name='P2', city=pcity, center1=c1, center2=c2, border1=b1, border2=b1)
# Should return the second Parcel, which has the center within the
# border.
qs = Parcel.objects.filter(center1__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
if not mysql:
# This time center2 is in a different coordinate system and needs
# to be wrapped in transformation SQL.
qs = Parcel.objects.filter(center2__within=F('border1'))
self.assertEqual(1, len(qs))
self.assertEqual('P2', qs[0].name)
# Should return the first Parcel, which has the center point equal
# to the point in the City ForeignKey.
qs = Parcel.objects.filter(center1=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
if not mysql:
# This time the city column should be wrapped in transformation SQL.
qs = Parcel.objects.filter(border2__contains=F('city__location__point'))
self.assertEqual(1, len(qs))
self.assertEqual('P1', qs[0].name)
def test07_values(self):
"Testing values() and values_list() and GeoQuerySets."
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
gqs = Location.objects.all()
gvqs = Location.objects.values()
gvlqs = Location.objects.values_list()
# Incrementing through each of the models, dictionaries, and tuples
# returned by the different types of GeoQuerySets.
for m, d, t in zip(gqs, gvqs, gvlqs):
# The values should be Geometry objects and not raw strings returned
# by the spatial database.
self.failUnless(isinstance(d['point'], Geometry))
self.failUnless(isinstance(t[1], Geometry))
self.assertEqual(m.point, d['point'])
self.assertEqual(m.point, t[1])
def test08_defer_only(self):
"Testing defer() and only() on Geographic models."
qs = Location.objects.all()
def_qs = Location.objects.defer('point')
for loc, def_loc in zip(qs, def_qs):
self.assertEqual(loc.point, def_loc.point)
def test09_pk_relations(self):
"Ensuring correct primary key column is selected across relations. See #10757."
# The expected ID values -- notice the last two location IDs
# are out of order. Dallas and Houston have location IDs that differ
# from their PKs -- this is done to ensure that the related location
# ID column is selected instead of ID column for the city.
city_ids = (1, 2, 3, 4, 5)
loc_ids = (1, 2, 3, 5, 4)
ids_qs = City.objects.order_by('id').values('id', 'location__id')
for val_dict, c_id, l_id in zip(ids_qs, city_ids, loc_ids):
self.assertEqual(val_dict['id'], c_id)
self.assertEqual(val_dict['location__id'], l_id)
def test10_combine(self):
"Testing the combination of two GeoQuerySets. See #10807."
buf1 = City.objects.get(name='Aurora').location.point.buffer(0.1)
buf2 = City.objects.get(name='Kecksburg').location.point.buffer(0.1)
qs1 = City.objects.filter(location__point__within=buf1)
qs2 = City.objects.filter(location__point__within=buf2)
combined = qs1 | qs2
names = [c.name for c in combined]
self.assertEqual(2, len(names))
self.failUnless('Aurora' in names)
self.failUnless('Kecksburg' in names)
def test11_geoquery_pickle(self):
"Ensuring GeoQuery objects are unpickled correctly. See #10839."
import pickle
from django.contrib.gis.db.models.sql import GeoQuery
qs = City.objects.all()
q_str = pickle.dumps(qs.query)
q = pickle.loads(q_str)
self.assertEqual(GeoQuery, q.__class__)
# TODO: fix on Oracle -- get the following error because the SQL is ordered
# by a geometry object, which Oracle apparently doesn't like:
# ORA-22901: cannot compare nested table or VARRAY or LOB attributes of an object type
@no_oracle
def test12a_count(self):
"Testing `Count` aggregate use with the `GeoManager` on geo-fields."
# The City, 'Fort Worth' uses the same location as Dallas.
dallas = City.objects.get(name='Dallas')
# Count annotation should be 2 for the Dallas location now.
loc = Location.objects.annotate(num_cities=Count('city')).get(id=dallas.location.id)
self.assertEqual(2, loc.num_cities)
def test12b_count(self):
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
# Should only be one author (Trevor Paglen) returned by this query, and
# the annotation should have 3 for the number of books, see #11087.
# Also testing with a `GeoValuesQuerySet`, see #11489.
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
self.assertEqual(1, len(qs))
self.assertEqual(3, qs[0].num_books)
self.assertEqual(1, len(vqs))
self.assertEqual(3, vqs[0]['num_books'])
# TODO: The phantom model does appear on Oracle.
@no_oracle
def test13_select_related_null_fk(self):
"Testing `select_related` on a nullable ForeignKey via `GeoManager`. See #11381."
no_author = Book.objects.create(title='Without Author')
b = Book.objects.select_related('author').get(title='Without Author')
# Should be `None`, and not a 'dummy' model.
self.assertEqual(None, b.author)
@no_mysql
@no_oracle
@no_spatialite
def test14_collect(self):
"Testing the `collect` GeoQuerySet method and `Collect` aggregate."
# Reference query:
# SELECT AsText(ST_Collect("relatedapp_location"."point")) FROM "relatedapp_city" LEFT OUTER JOIN
# "relatedapp_location" ON ("relatedapp_city"."location_id" = "relatedapp_location"."id")
# WHERE "relatedapp_city"."state" = 'TX';
ref_geom = GEOSGeometry('MULTIPOINT(-97.516111 33.058333,-96.801611 32.782057,-95.363151 29.763374,-96.801611 32.782057)')
c1 = City.objects.filter(state='TX').collect(field_name='location__point')
c2 = City.objects.filter(state='TX').aggregate(Collect('location__point'))['location__point__collect']
for coll in (c1, c2):
# Even though Dallas and Ft. Worth share same point, Collect doesn't
# consolidate -- that's why 4 points in MultiPoint.
self.assertEqual(4, len(coll))
self.assertEqual(ref_geom, coll)
def test15_invalid_select_related(self):
"Testing doing select_related on the related name manager of a unique FK. See #13934."
qs = Article.objects.select_related('author__article')
# This triggers TypeError when `get_default_columns` has no `local_only`
# keyword. The TypeError is swallowed if QuerySet is actually
# evaluated as list generation swallows TypeError in CPython.
sql = str(qs.query)
# TODO: Related tests for KML, GML, and distance lookups.
| apache-2.0 |
voxmedia/thumbor | thumbor/filters/blur.py | 3 | 1703 | # -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com [email protected]
import math
from thumbor.filters import BaseFilter, filter_method
from thumbor.ext.filters import _convolution
MAX_RADIUS = 150
try:
xrange # Python 2
except NameError:
xrange = range # Python 3
class Filter(BaseFilter):
"""
Usage: /filters:blur(<radius> [, <sigma>])
Examples of use:
/filters:blur(1)/
/filters:blur(4)/
/filters:blur(4, 2)/
"""
def generate_1d_matrix(self, sigma, radius):
matrix_size = (radius * 2) + 1
matrix = []
two_sigma_squared = float(2 * sigma * sigma)
for x in xrange(matrix_size):
adj_x = x - radius
exp = math.e ** -(((adj_x * adj_x)) / two_sigma_squared)
matrix.append(exp / math.sqrt(two_sigma_squared * math.pi))
return tuple(matrix), matrix_size
@filter_method(BaseFilter.PositiveNonZeroNumber, BaseFilter.DecimalNumber)
def blur(self, radius, sigma=0):
if sigma == 0:
sigma = radius
if radius > MAX_RADIUS:
radius = MAX_RADIUS
matrix, matrix_size = self.generate_1d_matrix(sigma, radius)
mode, data = self.engine.image_data_as_rgb()
imgdata = _convolution.apply(mode, data, self.engine.size[0], self.engine.size[1], matrix, matrix_size, True)
imgdata = _convolution.apply(mode, imgdata, self.engine.size[0], self.engine.size[1], matrix, 1, True)
self.engine.set_image_data(imgdata)
| mit |
Versatilus/dragonfly | dragonfly/actions/keyboard/_pynput.py | 2 | 8733 | #
# This file is part of Dragonfly.
# (c) Copyright 2007, 2008 by Christo Butcher
# Licensed under the LGPL.
#
# Dragonfly is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Dragonfly is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Dragonfly. If not, see
# <http://www.gnu.org/licenses/>.
#
"""
This file implements the a keyboard interface using the *pynput* Python
package. This implementation is used for Linux (X11) and Mac OS (Darwin).
"""
import logging
import sys
import time
from pynput.keyboard import Controller, KeyCode, Key
from ._base import BaseKeyboard, Typeable as BaseTypeable
class Typeable(BaseTypeable):
""" Typeable class for pynput. """
_log = logging.getLogger("keyboard")
def __init__(self, code, modifiers=(), name=None, is_text=False):
BaseTypeable.__init__(self, code, modifiers, name, is_text)
class SafeKeyCode(object):
"""
Class to safely get key codes from pynput.
"""
def __getattr__(self, name):
# Get the key code from pynput, returning KeyCode(vk=-1, char=name)
# if the key name isn't present.
# Keys are undefined on some platforms, e.g. "pause" on Darwin.
return getattr(Key, name, KeyCode(vk=-1, char=name))
virtual_keys = SafeKeyCode()
class BaseKeySymbols(object):
""" Base key symbols for pynput. """
# Whitespace and editing keys
RETURN = virtual_keys.enter
TAB = virtual_keys.tab
SPACE = virtual_keys.space
BACK = virtual_keys.backspace
DELETE = virtual_keys.delete
# Main modifier keys
SHIFT = virtual_keys.shift
CONTROL = virtual_keys.ctrl
ALT = virtual_keys.alt
# Right modifier keys
RSHIFT = virtual_keys.shift_r
RCONTROL = virtual_keys.ctrl_r
RALT = virtual_keys.alt_r
# Special keys
ESCAPE = virtual_keys.esc
INSERT = virtual_keys.insert
PAUSE = virtual_keys.pause
LSUPER = virtual_keys.cmd_l
RSUPER = virtual_keys.cmd_r
APPS = virtual_keys.menu
SNAPSHOT = virtual_keys.print_screen
# Lock keys
SCROLL_LOCK = virtual_keys.scroll_lock
NUM_LOCK = virtual_keys.num_lock
CAPS_LOCK = virtual_keys.caps_lock
# Navigation keys
UP = virtual_keys.up
DOWN = virtual_keys.down
LEFT = virtual_keys.left
RIGHT = virtual_keys.right
PAGE_UP = virtual_keys.page_up
PAGE_DOWN = virtual_keys.page_down
HOME = virtual_keys.home
END = virtual_keys.end
# Number pad keys
# pynput currently only exposes these for Windows, so we'll map them to
# equivalent characters and numbers instead.
MULTIPLY = KeyCode(char="*")
ADD = KeyCode(char="+")
SEPARATOR = KeyCode(char=".") # this is locale-dependent.
SUBTRACT = KeyCode(char="-")
DECIMAL = KeyCode(char=".")
DIVIDE = KeyCode(char="/")
NUMPAD0 = KeyCode(char="0")
NUMPAD1 = KeyCode(char="1")
NUMPAD2 = KeyCode(char="2")
NUMPAD3 = KeyCode(char="3")
NUMPAD4 = KeyCode(char="4")
NUMPAD5 = KeyCode(char="5")
NUMPAD6 = KeyCode(char="6")
NUMPAD7 = KeyCode(char="7")
NUMPAD8 = KeyCode(char="8")
NUMPAD9 = KeyCode(char="9")
# Function keys
# F13-20 don't work on X11 with pynput because they are not usually
# part of the keyboard map.
F1 = virtual_keys.f1
F2 = virtual_keys.f2
F3 = virtual_keys.f3
F4 = virtual_keys.f4
F5 = virtual_keys.f5
F6 = virtual_keys.f6
F7 = virtual_keys.f7
F8 = virtual_keys.f8
F9 = virtual_keys.f9
F10 = virtual_keys.f10
F11 = virtual_keys.f11
F12 = virtual_keys.f12
F13 = virtual_keys.f13
F14 = virtual_keys.f14
F15 = virtual_keys.f15
F16 = virtual_keys.f16
F17 = virtual_keys.f17
F18 = virtual_keys.f18
F19 = virtual_keys.f19
F20 = virtual_keys.f20
class X11KeySymbols(BaseKeySymbols):
"""
Symbols for X11 from pynput.
This class includes extra symbols matching those that dragonfly's Win32
keyboard interface provides.
"""
# Number pad keys
# Retrieved from /usr/include/X11/keysymdef.h on Debian 9.
MULTIPLY = KeyCode.from_vk(0xffaa)
ADD = KeyCode.from_vk(0xffab)
SEPARATOR = KeyCode.from_vk(0xffac)
SUBTRACT = KeyCode.from_vk(0xffad)
DECIMAL = KeyCode.from_vk(0xffae)
DIVIDE = KeyCode.from_vk(0xffaf)
NUMPAD0 = KeyCode.from_vk(0xffb0)
NUMPAD1 = KeyCode.from_vk(0xffb1)
NUMPAD2 = KeyCode.from_vk(0xffb2)
NUMPAD3 = KeyCode.from_vk(0xffb3)
NUMPAD4 = KeyCode.from_vk(0xffb4)
NUMPAD5 = KeyCode.from_vk(0xffb5)
NUMPAD6 = KeyCode.from_vk(0xffb6)
NUMPAD7 = KeyCode.from_vk(0xffb7)
NUMPAD8 = KeyCode.from_vk(0xffb8)
NUMPAD9 = KeyCode.from_vk(0xffb9)
# Function keys F21-F24.
# Retrieved from /usr/include/X11/keysymdef.h on Debian 9.
# These keys don't work on X11 with pynput because they are not usually
# part of the keyboard map. They are set here to avoid some warnings
# and because the Windows keyboard supports them.
F21 = KeyCode.from_vk(0xffd1)
F22 = KeyCode.from_vk(0xffd2)
F23 = KeyCode.from_vk(0xffd3)
F24 = KeyCode.from_vk(0xffd4)
# Multimedia keys
# Retrieved from /usr/include/X11/XF86keysym.h on Debian 9.
# These should work on Debian-based distributions like Ubunutu, but
# might not work using different X11 server implementations because the
# symbols are vendor-specific.
# Any errors raised when typing these or any other keys will be caught
# and logged.
VOLUME_UP = KeyCode.from_vk(0x1008FF13)
VOLUME_DOWN = KeyCode.from_vk(0x1008FF11)
VOLUME_MUTE = KeyCode.from_vk(0x1008FF12)
MEDIA_NEXT_TRACK = KeyCode.from_vk(0x1008FF17)
MEDIA_PREV_TRACK = KeyCode.from_vk(0x1008FF16)
MEDIA_PLAY_PAUSE = KeyCode.from_vk(0x1008FF14)
BROWSER_BACK = KeyCode.from_vk(0x1008FF26)
BROWSER_FORWARD = KeyCode.from_vk(0x1008FF27)
class DarwinKeySymbols(BaseKeySymbols):
"""
Symbols for Darwin from pynput.
This class includes some extra symbols to prevent errors in
typeables.py.
All extras will be disabled (key code of -1).
"""
# Extra function keys.
F21 = virtual_keys.f21
F22 = virtual_keys.f22
F23 = virtual_keys.f23
F24 = virtual_keys.f24
# Multimedia keys.
VOLUME_UP = virtual_keys.volume_up
VOLUME_DOWN = virtual_keys.volume_down
VOLUME_MUTE = virtual_keys.volume_mute
MEDIA_NEXT_TRACK = virtual_keys.media_next_track
MEDIA_PREV_TRACK = virtual_keys.media_prev_track
MEDIA_PLAY_PAUSE = virtual_keys.media_play_pause
BROWSER_BACK = virtual_keys.browser_back
BROWSER_FORWARD = virtual_keys.browser_forward
class Keyboard(BaseKeyboard):
"""Static class wrapper around pynput.keyboard."""
_controller = Controller()
_log = logging.getLogger("keyboard")
@classmethod
def send_keyboard_events(cls, events):
"""
Send a sequence of keyboard events.
Positional arguments:
events -- a sequence of tuples of the form
(keycode, down, timeout), where
keycode (str|KeyCode): pynput key code.
down (boolean): True means the key will be pressed down,
False means the key will be released.
timeout (int): number of seconds to sleep after
the keyboard event.
"""
cls._log.debug("Keyboard.send_keyboard_events %r", events)
for event in events:
(key, down, timeout) = event
# Raise an error if the key is unsupported. 'key' can also be a
# string, e.g. "a", "b", "/", etc, but we don't check if those
# are valid.
if isinstance(key, KeyCode) and key.vk == -1:
raise ValueError("Unsupported key: %r" % key.char)
# Press/release the key, catching any errors.
try:
cls._controller.touch(key, down)
except Exception as e:
cls._log.exception("Failed to type key code %s: %s",
key, e)
# Sleep after the keyboard event if necessary.
if timeout:
time.sleep(timeout)
@classmethod
def get_typeable(cls, char, is_text=False):
return Typeable(char, is_text=is_text)
| lgpl-3.0 |
michaelni/audacity | lib-src/lv2/sord/waflib/Build.py | 58 | 21119 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,sys,errno,re,shutil
try:
import cPickle
except ImportError:
import pickle as cPickle
from waflib import Runner,TaskGen,Utils,ConfigSet,Task,Logs,Options,Context,Errors
import waflib.Node
CACHE_DIR='c4che'
CACHE_SUFFIX='_cache.py'
INSTALL=1337
UNINSTALL=-1337
SAVED_ATTRS='root node_deps raw_deps task_sigs'.split()
CFG_FILES='cfg_files'
POST_AT_ONCE=0
POST_LAZY=1
POST_BOTH=2
class BuildContext(Context.Context):
'''executes the build'''
cmd='build'
variant=''
def __init__(self,**kw):
super(BuildContext,self).__init__(**kw)
self.is_install=0
self.top_dir=kw.get('top_dir',Context.top_dir)
self.run_dir=kw.get('run_dir',Context.run_dir)
self.post_mode=POST_AT_ONCE
self.out_dir=kw.get('out_dir',Context.out_dir)
self.cache_dir=kw.get('cache_dir',None)
if not self.cache_dir:
self.cache_dir=self.out_dir+os.sep+CACHE_DIR
self.all_envs={}
self.task_sigs={}
self.node_deps={}
self.raw_deps={}
self.cache_dir_contents={}
self.task_gen_cache_names={}
self.launch_dir=Context.launch_dir
self.jobs=Options.options.jobs
self.targets=Options.options.targets
self.keep=Options.options.keep
self.cache_global=Options.cache_global
self.nocache=Options.options.nocache
self.progress_bar=Options.options.progress_bar
self.deps_man=Utils.defaultdict(list)
self.current_group=0
self.groups=[]
self.group_names={}
def get_variant_dir(self):
if not self.variant:
return self.out_dir
return os.path.join(self.out_dir,self.variant)
variant_dir=property(get_variant_dir,None)
def __call__(self,*k,**kw):
kw['bld']=self
ret=TaskGen.task_gen(*k,**kw)
self.task_gen_cache_names={}
self.add_to_group(ret,group=kw.get('group',None))
return ret
def rule(self,*k,**kw):
def f(rule):
ret=self(*k,**kw)
ret.rule=rule
return ret
return f
def __copy__(self):
raise Errors.WafError('build contexts are not supposed to be copied')
def install_files(self,*k,**kw):
pass
def install_as(self,*k,**kw):
pass
def symlink_as(self,*k,**kw):
pass
def load_envs(self):
node=self.root.find_node(self.cache_dir)
if not node:
raise Errors.WafError('The project was not configured: run "waf configure" first!')
lst=node.ant_glob('**/*%s'%CACHE_SUFFIX,quiet=True)
if not lst:
raise Errors.WafError('The cache directory is empty: reconfigure the project')
for x in lst:
name=x.path_from(node).replace(CACHE_SUFFIX,'').replace('\\','/')
env=ConfigSet.ConfigSet(x.abspath())
self.all_envs[name]=env
for f in env[CFG_FILES]:
newnode=self.root.find_resource(f)
try:
h=Utils.h_file(newnode.abspath())
except(IOError,AttributeError):
Logs.error('cannot find %r'%f)
h=Utils.SIG_NIL
newnode.sig=h
def init_dirs(self):
if not(os.path.isabs(self.top_dir)and os.path.isabs(self.out_dir)):
raise Errors.WafError('The project was not configured: run "waf configure" first!')
self.path=self.srcnode=self.root.find_dir(self.top_dir)
self.bldnode=self.root.make_node(self.variant_dir)
self.bldnode.mkdir()
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.execute_build()
def execute_build(self):
Logs.info("Waf: Entering directory `%s'"%self.variant_dir)
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
if self.progress_bar:
sys.stderr.write(Logs.colors.cursor_off)
try:
self.compile()
finally:
if self.progress_bar==1:
c=len(self.returned_tasks)or 1
self.to_log(self.progress_line(c,c,Logs.colors.BLUE,Logs.colors.NORMAL))
print('')
sys.stdout.flush()
sys.stderr.write(Logs.colors.cursor_on)
Logs.info("Waf: Leaving directory `%s'"%self.variant_dir)
self.post_build()
def restore(self):
try:
env=ConfigSet.ConfigSet(os.path.join(self.cache_dir,'build.config.py'))
except(IOError,OSError):
pass
else:
if env['version']<Context.HEXVERSION:
raise Errors.WafError('Version mismatch! reconfigure the project')
for t in env['tools']:
self.setup(**t)
f=None
try:
dbfn=os.path.join(self.variant_dir,Context.DBFILE)
try:
f=open(dbfn,'rb')
except(IOError,EOFError):
Logs.debug('build: Could not load the build cache %s (missing)'%dbfn)
else:
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
try:
data=cPickle.load(f)
except Exception ,e:
Logs.debug('build: Could not pickle the build cache %s: %r'%(dbfn,e))
else:
for x in SAVED_ATTRS:
setattr(self,x,data[x])
finally:
waflib.Node.pickle_lock.release()
finally:
if f:
f.close()
self.init_dirs()
def store(self):
data={}
for x in SAVED_ATTRS:
data[x]=getattr(self,x)
db=os.path.join(self.variant_dir,Context.DBFILE)
try:
waflib.Node.pickle_lock.acquire()
waflib.Node.Nod3=self.node_class
f=None
try:
f=open(db+'.tmp','wb')
cPickle.dump(data,f,-1)
finally:
if f:
f.close()
finally:
waflib.Node.pickle_lock.release()
try:
st=os.stat(db)
os.unlink(db)
if not Utils.is_win32:
os.chown(db+'.tmp',st.st_uid,st.st_gid)
except(AttributeError,OSError):
pass
os.rename(db+'.tmp',db)
def compile(self):
Logs.debug('build: compile()')
self.producer=Runner.Parallel(self,self.jobs)
self.producer.biter=self.get_build_iterator()
self.returned_tasks=[]
try:
self.producer.start()
except KeyboardInterrupt:
self.store()
raise
else:
if self.producer.dirty:
self.store()
if self.producer.error:
raise Errors.BuildError(self.producer.error)
def setup(self,tool,tooldir=None,funs=None):
if isinstance(tool,list):
for i in tool:self.setup(i,tooldir)
return
module=Context.load_tool(tool,tooldir)
if hasattr(module,"setup"):module.setup(self)
def get_env(self):
try:
return self.all_envs[self.variant]
except KeyError:
return self.all_envs['']
def set_env(self,val):
self.all_envs[self.variant]=val
env=property(get_env,set_env)
def add_manual_dependency(self,path,value):
if path is None:
raise ValueError('Invalid input')
if isinstance(path,waflib.Node.Node):
node=path
elif os.path.isabs(path):
node=self.root.find_resource(path)
else:
node=self.path.find_resource(path)
if isinstance(value,list):
self.deps_man[id(node)].extend(value)
else:
self.deps_man[id(node)].append(value)
def launch_node(self):
try:
return self.p_ln
except AttributeError:
self.p_ln=self.root.find_dir(self.launch_dir)
return self.p_ln
def hash_env_vars(self,env,vars_lst):
if not env.table:
env=env.parent
if not env:
return Utils.SIG_NIL
idx=str(id(env))+str(vars_lst)
try:
cache=self.cache_env
except AttributeError:
cache=self.cache_env={}
else:
try:
return self.cache_env[idx]
except KeyError:
pass
lst=[env[a]for a in vars_lst]
ret=Utils.h_list(lst)
Logs.debug('envhash: %s %r',Utils.to_hex(ret),lst)
cache[idx]=ret
return ret
def get_tgen_by_name(self,name):
cache=self.task_gen_cache_names
if not cache:
for g in self.groups:
for tg in g:
try:
cache[tg.name]=tg
except AttributeError:
pass
try:
return cache[name]
except KeyError:
raise Errors.WafError('Could not find a task generator for the name %r'%name)
def progress_line(self,state,total,col1,col2):
n=len(str(total))
Utils.rot_idx+=1
ind=Utils.rot_chr[Utils.rot_idx%4]
pc=(100.*state)/total
eta=str(self.timer)
fs="[%%%dd/%%%dd][%%s%%2d%%%%%%s][%s]["%(n,n,ind)
left=fs%(state,total,col1,pc,col2)
right='][%s%s%s]'%(col1,eta,col2)
cols=Logs.get_term_cols()-len(left)-len(right)+2*len(col1)+2*len(col2)
if cols<7:cols=7
ratio=((cols*state)//total)-1
bar=('='*ratio+'>').ljust(cols)
msg=Utils.indicator%(left,bar,right)
return msg
def declare_chain(self,*k,**kw):
return TaskGen.declare_chain(*k,**kw)
def pre_build(self):
for m in getattr(self,'pre_funs',[]):
m(self)
def post_build(self):
for m in getattr(self,'post_funs',[]):
m(self)
def add_pre_fun(self,meth):
try:
self.pre_funs.append(meth)
except AttributeError:
self.pre_funs=[meth]
def add_post_fun(self,meth):
try:
self.post_funs.append(meth)
except AttributeError:
self.post_funs=[meth]
def get_group(self,x):
if not self.groups:
self.add_group()
if x is None:
return self.groups[self.current_group]
if x in self.group_names:
return self.group_names[x]
return self.groups[x]
def add_to_group(self,tgen,group=None):
assert(isinstance(tgen,TaskGen.task_gen)or isinstance(tgen,Task.TaskBase))
tgen.bld=self
self.get_group(group).append(tgen)
def get_group_name(self,g):
if not isinstance(g,list):
g=self.groups[g]
for x in self.group_names:
if id(self.group_names[x])==id(g):
return x
return''
def get_group_idx(self,tg):
se=id(tg)
for i in range(len(self.groups)):
for t in self.groups[i]:
if id(t)==se:
return i
return None
def add_group(self,name=None,move=True):
if name and name in self.group_names:
Logs.error('add_group: name %s already present'%name)
g=[]
self.group_names[name]=g
self.groups.append(g)
if move:
self.current_group=len(self.groups)-1
def set_group(self,idx):
if isinstance(idx,str):
g=self.group_names[idx]
for i in range(len(self.groups)):
if id(g)==id(self.groups[i]):
self.current_group=i
else:
self.current_group=idx
def total(self):
total=0
for group in self.groups:
for tg in group:
try:
total+=len(tg.tasks)
except AttributeError:
total+=1
return total
def get_targets(self):
to_post=[]
min_grp=0
for name in self.targets.split(','):
tg=self.get_tgen_by_name(name)
if not tg:
raise Errors.WafError('target %r does not exist'%name)
m=self.get_group_idx(tg)
if m>min_grp:
min_grp=m
to_post=[tg]
elif m==min_grp:
to_post.append(tg)
return(min_grp,to_post)
def get_all_task_gen(self):
lst=[]
for g in self.groups:
lst.extend(g)
return lst
def post_group(self):
if self.targets=='*':
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
elif self.targets:
if self.cur<self._min_grp:
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
f()
else:
for tg in self._exact_tg:
tg.post()
else:
ln=self.launch_node()
if ln.is_child_of(self.bldnode):
Logs.warn('Building from the build directory, forcing --targets=*')
ln=self.srcnode
elif not ln.is_child_of(self.srcnode):
Logs.warn('CWD %s is not under %s, forcing --targets=* (run distclean?)'%(ln.abspath(),self.srcnode.abspath()))
ln=self.srcnode
for tg in self.groups[self.cur]:
try:
f=tg.post
except AttributeError:
pass
else:
if tg.path.is_child_of(ln):
f()
def get_tasks_group(self,idx):
tasks=[]
for tg in self.groups[idx]:
try:
tasks.extend(tg.tasks)
except AttributeError:
tasks.append(tg)
return tasks
def get_build_iterator(self):
self.cur=0
if self.targets and self.targets!='*':
(self._min_grp,self._exact_tg)=self.get_targets()
global lazy_post
if self.post_mode!=POST_LAZY:
while self.cur<len(self.groups):
self.post_group()
self.cur+=1
self.cur=0
while self.cur<len(self.groups):
if self.post_mode!=POST_AT_ONCE:
self.post_group()
tasks=self.get_tasks_group(self.cur)
Task.set_file_constraints(tasks)
Task.set_precedence_constraints(tasks)
self.cur_tasks=tasks
self.cur+=1
if not tasks:
continue
yield tasks
while 1:
yield[]
class inst(Task.Task):
color='CYAN'
def uid(self):
lst=[self.dest,self.path]+self.source
return Utils.h_list(repr(lst))
def post(self):
buf=[]
for x in self.source:
if isinstance(x,waflib.Node.Node):
y=x
else:
y=self.path.find_resource(x)
if not y:
if Logs.verbose:
Logs.warn('Could not find %s immediately (may cause broken builds)'%x)
idx=self.generator.bld.get_group_idx(self)
for tg in self.generator.bld.groups[idx]:
if not isinstance(tg,inst)and id(tg)!=id(self):
tg.post()
y=self.path.find_resource(x)
if y:
break
else:
raise Errors.WafError('Could not find %r in %r'%(x,self.path))
buf.append(y)
self.inputs=buf
def runnable_status(self):
ret=super(inst,self).runnable_status()
if ret==Task.SKIP_ME:
return Task.RUN_ME
return ret
def __str__(self):
return''
def run(self):
return self.generator.exec_task()
def get_install_path(self,destdir=True):
dest=Utils.subst_vars(self.dest,self.env)
dest=dest.replace('/',os.sep)
if destdir and Options.options.destdir:
dest=os.path.join(Options.options.destdir,os.path.splitdrive(dest)[1].lstrip(os.sep))
return dest
def exec_install_files(self):
destpath=self.get_install_path()
if not destpath:
raise Errors.WafError('unknown installation path %r'%self.generator)
for x,y in zip(self.source,self.inputs):
if self.relative_trick:
destfile=os.path.join(destpath,y.path_from(self.path))
Utils.check_dir(os.path.dirname(destfile))
else:
destfile=os.path.join(destpath,y.name)
self.generator.bld.do_install(y.abspath(),destfile,self.chmod)
def exec_install_as(self):
destfile=self.get_install_path()
self.generator.bld.do_install(self.inputs[0].abspath(),destfile,self.chmod)
def exec_symlink_as(self):
destfile=self.get_install_path()
src=self.link
if self.relative_trick:
src=os.path.relpath(src,os.path.dirname(destfile))
self.generator.bld.do_link(src,destfile)
class InstallContext(BuildContext):
'''installs the targets on the system'''
cmd='install'
def __init__(self,**kw):
super(InstallContext,self).__init__(**kw)
self.uninstall=[]
self.is_install=INSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
d,_=os.path.split(tgt)
if not d:
raise Errors.WafError('Invalid installation given %r->%r'%(src,tgt))
Utils.check_dir(d)
srclbl=src.replace(self.srcnode.abspath()+os.sep,'')
if not Options.options.force:
try:
st1=os.stat(tgt)
st2=os.stat(src)
except OSError:
pass
else:
if st1.st_mtime+2>=st2.st_mtime and st1.st_size==st2.st_size:
if not self.progress_bar:
Logs.info('- install %s (from %s)'%(tgt,srclbl))
return False
if not self.progress_bar:
Logs.info('+ install %s (from %s)'%(tgt,srclbl))
try:
os.remove(tgt)
except OSError:
pass
try:
shutil.copy2(src,tgt)
os.chmod(tgt,chmod)
except IOError:
try:
os.stat(src)
except(OSError,IOError):
Logs.error('File %r does not exist'%src)
raise Errors.WafError('Could not install the file %r'%tgt)
def do_link(self,src,tgt):
d,_=os.path.split(tgt)
Utils.check_dir(d)
link=False
if not os.path.islink(tgt):
link=True
elif os.readlink(tgt)!=src:
link=True
if link:
try:os.remove(tgt)
except OSError:pass
if not self.progress_bar:
Logs.info('+ symlink %s (to %s)'%(tgt,src))
os.symlink(src,tgt)
else:
if not self.progress_bar:
Logs.info('- symlink %s (to %s)'%(tgt,src))
def run_task_now(self,tsk,postpone):
tsk.post()
if not postpone:
if tsk.runnable_status()==Task.ASK_LATER:
raise self.WafError('cannot post the task %r'%tsk)
tsk.run()
def install_files(self,dest,files,env=None,chmod=Utils.O644,relative_trick=False,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
if isinstance(files,waflib.Node.Node):
tsk.source=[files]
else:
tsk.source=Utils.to_list(files)
tsk.dest=dest
tsk.exec_task=tsk.exec_install_files
tsk.relative_trick=relative_trick
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def install_as(self,dest,srcfile,env=None,chmod=Utils.O644,cwd=None,add=True,postpone=True):
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.path=cwd or self.path
tsk.chmod=chmod
tsk.source=[srcfile]
tsk.dest=dest
tsk.exec_task=tsk.exec_install_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
def symlink_as(self,dest,src,env=None,cwd=None,add=True,postpone=True,relative_trick=False):
if Utils.is_win32:
return
tsk=inst(env=env or self.env)
tsk.bld=self
tsk.dest=dest
tsk.path=cwd or self.path
tsk.source=[]
tsk.link=src
tsk.relative_trick=relative_trick
tsk.exec_task=tsk.exec_symlink_as
if add:self.add_to_group(tsk)
self.run_task_now(tsk,postpone)
return tsk
class UninstallContext(InstallContext):
'''removes the targets installed'''
cmd='uninstall'
def __init__(self,**kw):
super(UninstallContext,self).__init__(**kw)
self.is_install=UNINSTALL
def do_install(self,src,tgt,chmod=Utils.O644):
if not self.progress_bar:
Logs.info('- remove %s'%tgt)
self.uninstall.append(tgt)
try:
os.remove(tgt)
except OSError ,e:
if e.errno!=errno.ENOENT:
if not getattr(self,'uninstall_error',None):
self.uninstall_error=True
Logs.warn('build: some files could not be uninstalled (retry with -vv to list them)')
if Logs.verbose>1:
Logs.warn('Could not remove %s (error code %r)'%(e.filename,e.errno))
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def do_link(self,src,tgt):
try:
if not self.progress_bar:
Logs.info('- unlink %s'%tgt)
os.remove(tgt)
except OSError:
pass
while tgt:
tgt=os.path.dirname(tgt)
try:
os.rmdir(tgt)
except OSError:
break
def execute(self):
try:
def runnable_status(self):
return Task.SKIP_ME
setattr(Task.Task,'runnable_status_back',Task.Task.runnable_status)
setattr(Task.Task,'runnable_status',runnable_status)
super(UninstallContext,self).execute()
finally:
setattr(Task.Task,'runnable_status',Task.Task.runnable_status_back)
class CleanContext(BuildContext):
'''cleans the project'''
cmd='clean'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
try:
self.clean()
finally:
self.store()
def clean(self):
Logs.debug('build: clean called')
if self.bldnode!=self.srcnode:
lst=[]
for e in self.all_envs.values():
lst.extend(self.root.find_or_declare(f)for f in e[CFG_FILES])
for n in self.bldnode.ant_glob('**/*',excl='.lock* *conf_check_*/** config.log c4che/*',quiet=True):
if n in lst:
continue
n.delete()
self.root.children={}
for v in'node_deps task_sigs raw_deps'.split():
setattr(self,v,{})
class ListContext(BuildContext):
'''lists the targets to execute'''
cmd='list'
def execute(self):
self.restore()
if not self.all_envs:
self.load_envs()
self.recurse([self.run_dir])
self.pre_build()
self.timer=Utils.Timer()
for g in self.groups:
for tg in g:
try:
f=tg.post
except AttributeError:
pass
else:
f()
try:
self.get_tgen_by_name('')
except Exception:
pass
lst=list(self.task_gen_cache_names.keys())
lst.sort()
for k in lst:
Logs.pprint('GREEN',k)
class StepContext(BuildContext):
'''executes tasks in a step-by-step fashion, for debugging'''
cmd='step'
def __init__(self,**kw):
super(StepContext,self).__init__(**kw)
self.files=Options.options.files
def compile(self):
if not self.files:
Logs.warn('Add a pattern for the debug build, for example "waf step --files=main.c,app"')
BuildContext.compile(self)
return
targets=None
if self.targets and self.targets!='*':
targets=self.targets.split(',')
for g in self.groups:
for tg in g:
if targets and tg.name not in targets:
continue
try:
f=tg.post
except AttributeError:
pass
else:
f()
for pat in self.files.split(','):
matcher=self.get_matcher(pat)
for tg in g:
if isinstance(tg,Task.TaskBase):
lst=[tg]
else:
lst=tg.tasks
for tsk in lst:
do_exec=False
for node in getattr(tsk,'inputs',[]):
if matcher(node,output=False):
do_exec=True
break
for node in getattr(tsk,'outputs',[]):
if matcher(node,output=True):
do_exec=True
break
if do_exec:
ret=tsk.run()
Logs.info('%s -> exit %r'%(str(tsk),ret))
def get_matcher(self,pat):
inn=True
out=True
if pat.startswith('in:'):
out=False
pat=pat.replace('in:','')
elif pat.startswith('out:'):
inn=False
pat=pat.replace('out:','')
anode=self.root.find_node(pat)
pattern=None
if not anode:
if not pat.startswith('^'):
pat='^.+?%s'%pat
if not pat.endswith('$'):
pat='%s$'%pat
pattern=re.compile(pat)
def match(node,output):
if output==True and not out:
return False
if output==False and not inn:
return False
if anode:
return anode==node
else:
return pattern.match(node.abspath())
return match
BuildContext.store=Utils.nogc(BuildContext.store)
BuildContext.restore=Utils.nogc(BuildContext.restore)
| gpl-2.0 |
reneklacan/kivybomber | map.py | 1 | 12240 |
import random
from threading import Lock, Timer
from kivy.logger import Logger
from kivy.uix.gridlayout import GridLayout
from kivy.uix.relativelayout import RelativeLayout
from ai import AIMap
from components import game_objects
from constants import *
from effects import *
flame_lock = Lock()
class GameGrid(GridLayout):
def __init__(self, **kwargs):
GridLayout.__init__(self, cols=kwargs['grid_size'][0], **kwargs)
self.layout = kwargs['layout']
self.grid_size = kwargs['grid_size']
self.effects_propability = kwargs['effects_propability']
self.tiles = []
self.changes = []
self.frozen = False
self.tile_resized = 0
self.tiles_amount = self.grid_size[0] * self.grid_size[1]
for i, tile_type in enumerate(self.layout):
ix = i % self.grid_size[0]
iy = i // self.grid_size[0]
tile = GameTile(type=tile_type, ix=ix, iy=iy)
self.add_widget(tile)
self.tiles.append(tile)
self.aimap = AIMap(self)
def restart(self):
self.aimap.reset()
self.frozen = False
for i, tile_type in enumerate(self.layout):
self.tiles[i].type = tile_type
self.tiles[i].restart()
def on_tile_size(self):
self.tile_resized += 1
if self.tile_resized == self.tiles_amount:
self.on_load()
def on_load(self):
pass
def freeze(self):
self.frozen = True
def add_change(self, tile):
self.changes.append(tile)
def get_tile(self, ix, iy):
index = (self.grid_size[0]*iy + ix)
if index >= len(self.tiles) or index < 0:
Logger.debug(
'GameGrid: get_tile(%d,%d) index is %d (min: 0, max: %d)'
% (ix, iy, index, len(self.tiles) - 1)
)
return None
return self.tiles[index]
def get_tile_indexes(self, x, y):
width, height = self.parent.tile_size
return (int(x/width), int(self.grid_size[1] - (y/height)))
def find_path(self, from_tuple, to_tuple):
#print 'from:', from_tuple
#print 'to: ', to_tuple
start = self.get_tile(*from_tuple)
destination = self.get_tile(*to_tuple)
if destination is None or destination.is_obstacle():
return []
open_set = [start]
path_map = [[None for x in range(self.grid_size[0])] for y in range(self.grid_size[1])]
path_map[start.iy][start.ix] = 0
end = False
while open_set and not end:
tile = open_set.pop()
steps = path_map[tile.iy][tile.ix]
for neighbor in tile.tiles_around():
if neighbor is None:
continue
if not neighbor.is_obstacle():
current_steps = path_map[neighbor.iy][neighbor.ix]
if current_steps is None or current_steps > steps + 1:
path_map[neighbor.iy][neighbor.ix] = steps + 1
if neighbor == destination:
end = True
break
open_set.insert(0, neighbor)
if not end:
return []
path = [destination]
tile = destination
if tile is None:
return []
steps = path_map[tile.iy][tile.ix]
while steps and steps != 1:
for item in tile.tiles_around():
if path_map[item.iy][item.ix] == steps - 1:
path.append(item)
tile = item
steps = steps - 1
break
return path
class Bomb:
def __init__(self, tile, player):
self.tile = tile
self.tile.canvas.add(game_objects.bomb)
self.player = player
self.timer = Timer(self.player.bomb_timeout, self.explode)
self.timer.start()
def explode(self):
self.timer.cancel()
self.tile.bomb = None
Flame(self.tile, self.player)
def stop(self):
self.timer.cancel()
class Flame:
def __init__(self, epicentrum, player):
self.player = player
self.tiles = [epicentrum]
epicentrum.destroy()
epicentrum.canvas.add(game_objects.flame_center)
epicentrum.flame.append(FLAME_CENTER)
flame_type = {}
flame_type['top'] = FLAME_VERTICAL
flame_type['bottom'] = FLAME_VERTICAL
flame_type['left'] = FLAME_HOTIZONTAL
flame_type['right'] = FLAME_HOTIZONTAL
bombs = []
self.changes = []
for direction in ('top', 'bottom', 'left', 'right'):
tile = epicentrum
penetration = self.player.flame_penetration
end = False
for i in range(player.bomb_power):
tile = tile.next_tile(direction)
if not tile.is_destroyable():
break
if tile.is_block():
if penetration == 0:
break
penetration -= 1
self.changes.append(tile)
if tile.bomb is not None:
bombs.append(tile.bomb)
tile.destroy()
if not tile.flame:
if not penetration or i == player.bomb_power - 1:
# end of flame
tile.canvas.add(game_objects.flame_end[direction])
else:
# arm of flame
tile.canvas.add(game_objects.flame_arm[direction])
tile.flame = [flame_type[direction]]
else:
if tile.flame[0] == flame_type[direction]:
# flames have same direction
tile.flame.append(flame_type[direction])
tile.canvas.add(game_objects.flame_arm[direction])
else:
# flames are crossing
tile.flame.append(flame_type[direction])
tile.canvas.add(game_objects.flame_center)
self.tiles.append(tile)
if not penetration:
break
characters = []
characters += epicentrum.parent.parent.players
characters += epicentrum.parent.parent.monsters
for each in characters:
if each.get_tile() in self.tiles:
each.die()
for bomb in bombs:
bomb.explode()
self.timer = Timer(self.player.flame_timeout, self.expire)
self.timer.start()
def expire(self):
for tile in self.tiles:
flame_lock.acquire()
if tile.flame == []:
continue # probably after restart
tile.flame.pop(0)
tile.destroy()
if not tile.flame:
pass
else:
if tile.flame[0] == FLAME_VERTICAL:
tile.canvas.add(game_objects.flame_v)
elif tile.flame[0] == FLAME_HOTIZONTAL:
tile.canvas.add(game_objects.flame_h)
flame_lock.release()
self.player.grid.changes += self.changes
class GameTile(RelativeLayout):
def __init__(self, **kwargs):
self.ix = None
self.iy = None
RelativeLayout.__init__(self, **kwargs)
self.type = kwargs['type']
self.ix = kwargs['ix']
self.iy = kwargs['iy']
self.rectangle = None
self.bomb = None
self.effect = None
def __repr__(self):
if self.ix is None:
return 'GameTile'
return 'GameTile(ix: %d, iy: %d)' % (self.ix, self.iy)
def on_size(self, tile, size):
if not game_objects.resize(size):
return
if self.rectangle is not None:
self.rectangle.size = size
self.init()
self.parent.on_tile_size()
def init(self):
self.rectangle = None
self.bomb = None
self.flame = []
self.item = None
self.effect = None
self.canvas.clear()
if self.type == BLOCK:
self.canvas.add(game_objects.block)
elif self.type == MAZE:
self.canvas.add(game_objects.maze)
elif self.type == EMPTY:
pass
else:
self.type = SPACE
self.canvas.add(game_objects.space)
def restart(self):
if self.bomb is not None:
self.bomb.stop()
self.init()
def get_index_tuple(self):
return (self.ix, self.iy)
def is_crossable(self):
if self.type == SPACE and self.bomb is None:
return True
return False
def is_space(self):
return self.type == SPACE
def is_obstacle(self):
if self.type != SPACE:
return True
if self.bomb:
return True
return False
def is_destroyable(self):
if self.type == MAZE:
return False
return True
def is_block(self):
if self.type == BLOCK:
return True
return False
def destroy(self):
self.effect = None
self.canvas.clear()
self.canvas.add(game_objects.space)
if self.item is not None:
self.canvas.add(self.item.image)
self.effect = self.item
self.item = None
if self.is_block():
random_num = random.random()
propabilities = self.parent.effects_propability
# sort effects by their propability
effects = sorted(propabilities, key=propabilities.get)
for effect in effects:
propability = propabilities[effect]
if propability <= random_num:
cls = eval(effect)
self.item = cls()
break
self.type = SPACE
def get_effect(self):
effect = self.effect
if effect is not None:
self.destroy()
return effect
def add_bomb(self, player):
# if game is restarting
if self.parent.frozen:
return False
if self.bomb:
return False
print 'bomb has been planted'
self.parent.aimap.plant_bomb(player, self)
self.bomb = Bomb(self, player)
return True
def next_tile(self, direction):
if direction == 'top':
return self.top_tile()
elif direction == 'bottom':
return self.bottom_tile()
elif direction == 'left':
return self.left_tile()
elif direction == 'right':
return self.right_tile()
else:
raise Exception('next_tile - direction %s' % direction)
def top_tile(self):
return self.parent.get_tile(self.ix, self.iy - 1)
def bottom_tile(self):
return self.parent.get_tile(self.ix, self.iy + 1)
def left_tile(self):
return self.parent.get_tile(self.ix - 1, self.iy)
def right_tile(self):
return self.parent.get_tile(self.ix + 1, self.iy)
def tiles_around(self, space=False, dictionary=False):
tiles = {
'top': self.top_tile(),
'bottom': self.bottom_tile(),
'left': self.left_tile(),
'right': self.right_tile()
}
if space:
for direction, tile in tiles.items():
if tile is None or not tile.is_crossable():
del tiles[direction]
if dictionary:
return tiles
return tiles.values()
def eight_tiles_around(self):
"""
X X X
X O X
X X X
"""
tiles = []
left_tile = self.left_tile()
right_tile = self.right_tile()
tiles.append(left_tile)
tiles.append(left_tile.top_tile())
tiles.append(left_tile.bottom_tile())
tiles.append(right_tile)
tiles.append(right_tile.top_tile())
tiles.append(right_tile.bottom_tile())
tiles.append(self.top_tile())
tiles.append(self.bottom_tile())
return tiles
| bsd-3-clause |
hopeall/odoo | addons/crm_helpdesk/crm_helpdesk.py | 182 | 7480 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp.addons.crm import crm
from openerp.osv import fields, osv
from openerp import tools
from openerp.tools.translate import _
from openerp.tools import html2plaintext
class crm_helpdesk(osv.osv):
""" Helpdesk Cases """
_name = "crm.helpdesk"
_description = "Helpdesk"
_order = "id desc"
_inherit = ['mail.thread']
_columns = {
'id': fields.integer('ID', readonly=True),
'name': fields.char('Name', required=True),
'active': fields.boolean('Active', required=False),
'date_action_last': fields.datetime('Last Action', readonly=1),
'date_action_next': fields.datetime('Next Action', readonly=1),
'description': fields.text('Description'),
'create_date': fields.datetime('Creation Date' , readonly=True),
'write_date': fields.datetime('Update Date' , readonly=True),
'date_deadline': fields.date('Deadline'),
'user_id': fields.many2one('res.users', 'Responsible'),
'section_id': fields.many2one('crm.case.section', 'Sales Team', \
select=True, help='Responsible sales team. Define Responsible user and Email account for mail gateway.'),
'company_id': fields.many2one('res.company', 'Company'),
'date_closed': fields.datetime('Closed', readonly=True),
'partner_id': fields.many2one('res.partner', 'Partner'),
'email_cc': fields.text('Watchers Emails', size=252 , help="These email addresses will be added to the CC field of all inbound and outbound emails for this record before being sent. Separate multiple email addresses with a comma"),
'email_from': fields.char('Email', size=128, help="Destination email for email gateway"),
'date': fields.datetime('Date'),
'ref': fields.reference('Reference', selection=openerp.addons.base.res.res_request.referencable_models),
'ref2': fields.reference('Reference 2', selection=openerp.addons.base.res.res_request.referencable_models),
'channel_id': fields.many2one('crm.tracking.medium', 'Channel', help="Communication channel."),
'planned_revenue': fields.float('Planned Revenue'),
'planned_cost': fields.float('Planned Costs'),
'priority': fields.selection([('0','Low'), ('1','Normal'), ('2','High')], 'Priority'),
'probability': fields.float('Probability (%)'),
'categ_id': fields.many2one('crm.case.categ', 'Category', \
domain="['|',('section_id','=',False),('section_id','=',section_id),\
('object_id.model', '=', 'crm.helpdesk')]"),
'duration': fields.float('Duration', states={'done': [('readonly', True)]}),
'state': fields.selection(
[('draft', 'New'),
('open', 'In Progress'),
('pending', 'Pending'),
('done', 'Closed'),
('cancel', 'Cancelled')], 'Status', readonly=True, track_visibility='onchange',
help='The status is set to \'Draft\', when a case is created.\
\nIf the case is in progress the status is set to \'Open\'.\
\nWhen the case is over, the status is set to \'Done\'.\
\nIf the case needs to be reviewed then the status is set to \'Pending\'.'),
}
_defaults = {
'active': lambda *a: 1,
'user_id': lambda s, cr, uid, c: uid,
'state': lambda *a: 'draft',
'date': fields.datetime.now,
'company_id': lambda s, cr, uid, c: s.pool.get('res.company')._company_default_get(cr, uid, 'crm.helpdesk', context=c),
'priority': '1',
}
def on_change_partner_id(self, cr, uid, ids, partner_id, context=None):
values = {}
if partner_id:
partner = self.pool.get('res.partner').browse(cr, uid, partner_id, context=context)
values = {
'email_from': partner.email,
}
return {'value': values}
def write(self, cr, uid, ids, values, context=None):
""" Override to add case management: open/close dates """
if values.get('state'):
if values.get('state') in ['draft', 'open'] and not values.get('date_open'):
values['date_open'] = fields.datetime.now()
elif values.get('state') == 'done' and not values.get('date_closed'):
values['date_closed'] = fields.datetime.now()
return super(crm_helpdesk, self).write(cr, uid, ids, values, context=context)
def case_escalate(self, cr, uid, ids, context=None):
""" Escalates case to parent level """
data = {'active': True}
for case in self.browse(cr, uid, ids, context=context):
if case.section_id and case.section_id.parent_id:
parent_id = case.section_id.parent_id
data['section_id'] = parent_id.id
if parent_id.change_responsible and parent_id.user_id:
data['user_id'] = parent_id.user_id.id
else:
raise osv.except_osv(_('Error!'), _('You can not escalate, you are already at the top level regarding your sales-team category.'))
self.write(cr, uid, [case.id], data, context=context)
return True
# -------------------------------------------------------
# Mail gateway
# -------------------------------------------------------
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
if custom_values is None:
custom_values = {}
desc = html2plaintext(msg.get('body')) if msg.get('body') else ''
defaults = {
'name': msg.get('subject') or _("No Subject"),
'description': desc,
'email_from': msg.get('from'),
'email_cc': msg.get('cc'),
'user_id': False,
'partner_id': msg.get('author_id', False),
}
defaults.update(custom_values)
return super(crm_helpdesk, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
joone/chromium-crosswalk | build/detect_host_arch.py | 111 | 1179 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Outputs host CPU architecture in format recognized by gyp."""
import platform
import re
import sys
def HostArch():
"""Returns the host architecture with a predictable string."""
host_arch = platform.machine()
# Convert machine type to format recognized by gyp.
if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
host_arch = 'ia32'
elif host_arch in ['x86_64', 'amd64']:
host_arch = 'x64'
elif host_arch.startswith('arm'):
host_arch = 'arm'
# platform.machine is based on running kernel. It's possible to use 64-bit
# kernel with 32-bit userland, e.g. to give linker slightly more memory.
# Distinguish between different userland bitness by querying
# the python binary.
if host_arch == 'x64' and platform.architecture()[0] == '32bit':
host_arch = 'ia32'
return host_arch
def DoMain(_):
"""Hook to be called from gyp without starting a separate python
interpreter."""
return HostArch()
if __name__ == '__main__':
print DoMain([])
| bsd-3-clause |
Ballz0fSteel/Umeko | lib/youtube_dl/downloader/common.py | 10 | 14495 | from __future__ import division, unicode_literals
import os
import re
import sys
import time
import random
from ..compat import compat_os_name
from ..utils import (
decodeArgument,
encodeFilename,
error_to_compat_str,
format_bytes,
shell_quote,
timeconvert,
)
class FileDownloader(object):
"""File Downloader class.
File downloader objects are the ones responsible of downloading the
actual video file and writing it to disk.
File downloaders accept a lot of parameters. In order not to saturate
the object constructor with arguments, it receives a dictionary of
options instead.
Available options:
verbose: Print additional info to stdout.
quiet: Do not print messages to stdout.
ratelimit: Download speed limit, in bytes/sec.
retries: Number of times to retry for HTTP error 5xx
buffersize: Size of download buffer in bytes.
noresizebuffer: Do not automatically resize the download buffer.
continuedl: Try to continue downloads if possible.
noprogress: Do not print the progress bar.
logtostderr: Log messages to stderr instead of stdout.
consoletitle: Display progress in console window's titlebar.
nopart: Do not use temporary .part files.
updatetime: Use the Last-modified header to set output file timestamps.
test: Download only first bytes to test the downloader.
min_filesize: Skip files smaller than this size
max_filesize: Skip files larger than this size
xattr_set_filesize: Set ytdl.filesize user xattribute with expected size.
(experimental)
external_downloader_args: A list of additional command-line arguments for the
external downloader.
hls_use_mpegts: Use the mpegts container for HLS videos.
Subclasses of this one must re-define the real_download method.
"""
_TEST_FILE_SIZE = 10241
params = None
def __init__(self, ydl, params):
"""Create a FileDownloader object with the given options."""
self.ydl = ydl
self._progress_hooks = []
self.params = params
self.add_progress_hook(self.report_progress)
@staticmethod
def format_seconds(seconds):
(mins, secs) = divmod(seconds, 60)
(hours, mins) = divmod(mins, 60)
if hours > 99:
return '--:--:--'
if hours == 0:
return '%02d:%02d' % (mins, secs)
else:
return '%02d:%02d:%02d' % (hours, mins, secs)
@staticmethod
def calc_percent(byte_counter, data_len):
if data_len is None:
return None
return float(byte_counter) / float(data_len) * 100.0
@staticmethod
def format_percent(percent):
if percent is None:
return '---.-%'
return '%6s' % ('%3.1f%%' % percent)
@staticmethod
def calc_eta(start, now, total, current):
if total is None:
return None
if now is None:
now = time.time()
dif = now - start
if current == 0 or dif < 0.001: # One millisecond
return None
rate = float(current) / dif
return int((float(total) - float(current)) / rate)
@staticmethod
def format_eta(eta):
if eta is None:
return '--:--'
return FileDownloader.format_seconds(eta)
@staticmethod
def calc_speed(start, now, bytes):
dif = now - start
if bytes == 0 or dif < 0.001: # One millisecond
return None
return float(bytes) / dif
@staticmethod
def format_speed(speed):
if speed is None:
return '%10s' % '---b/s'
return '%10s' % ('%s/s' % format_bytes(speed))
@staticmethod
def format_retries(retries):
return 'inf' if retries == float('inf') else '%.0f' % retries
@staticmethod
def best_block_size(elapsed_time, bytes):
new_min = max(bytes / 2.0, 1.0)
new_max = min(max(bytes * 2.0, 1.0), 4194304) # Do not surpass 4 MB
if elapsed_time < 0.001:
return int(new_max)
rate = bytes / elapsed_time
if rate > new_max:
return int(new_max)
if rate < new_min:
return int(new_min)
return int(rate)
@staticmethod
def parse_bytes(bytestr):
"""Parse a string indicating a byte quantity into an integer."""
matchobj = re.match(r'(?i)^(\d+(?:\.\d+)?)([kMGTPEZY]?)$', bytestr)
if matchobj is None:
return None
number = float(matchobj.group(1))
multiplier = 1024.0 ** 'bkmgtpezy'.index(matchobj.group(2).lower())
return int(round(number * multiplier))
def to_screen(self, *args, **kargs):
self.ydl.to_screen(*args, **kargs)
def to_stderr(self, message):
self.ydl.to_screen(message)
def to_console_title(self, message):
self.ydl.to_console_title(message)
def trouble(self, *args, **kargs):
self.ydl.trouble(*args, **kargs)
def report_warning(self, *args, **kargs):
self.ydl.report_warning(*args, **kargs)
def report_error(self, *args, **kargs):
self.ydl.report_error(*args, **kargs)
def slow_down(self, start_time, now, byte_counter):
"""Sleep if the download speed is over the rate limit."""
rate_limit = self.params.get('ratelimit')
if rate_limit is None or byte_counter == 0:
return
if now is None:
now = time.time()
elapsed = now - start_time
if elapsed <= 0.0:
return
speed = float(byte_counter) / elapsed
if speed > rate_limit:
time.sleep(max((byte_counter // rate_limit) - elapsed, 0))
def temp_name(self, filename):
"""Returns a temporary filename for the given filename."""
if self.params.get('nopart', False) or filename == '-' or \
(os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))):
return filename
return filename + '.part'
def undo_temp_name(self, filename):
if filename.endswith('.part'):
return filename[:-len('.part')]
return filename
def ytdl_filename(self, filename):
return filename + '.ytdl'
def try_rename(self, old_filename, new_filename):
try:
if old_filename == new_filename:
return
os.rename(encodeFilename(old_filename), encodeFilename(new_filename))
except (IOError, OSError) as err:
self.report_error('unable to rename file: %s' % error_to_compat_str(err))
def try_utime(self, filename, last_modified_hdr):
"""Try to set the last-modified time of the given file."""
if last_modified_hdr is None:
return
if not os.path.isfile(encodeFilename(filename)):
return
timestr = last_modified_hdr
if timestr is None:
return
filetime = timeconvert(timestr)
if filetime is None:
return filetime
# Ignore obviously invalid dates
if filetime == 0:
return
try:
os.utime(filename, (time.time(), filetime))
except Exception:
pass
return filetime
def report_destination(self, filename):
"""Report destination filename."""
self.to_screen('[download] Destination: ' + filename)
def _report_progress_status(self, msg, is_last_line=False):
fullmsg = '[download] ' + msg
if self.params.get('progress_with_newline', False):
self.to_screen(fullmsg)
else:
if compat_os_name == 'nt':
prev_len = getattr(self, '_report_progress_prev_line_length',
0)
if prev_len > len(fullmsg):
fullmsg += ' ' * (prev_len - len(fullmsg))
self._report_progress_prev_line_length = len(fullmsg)
clear_line = '\r'
else:
clear_line = ('\r\x1b[K' if sys.stderr.isatty() else '\r')
self.to_screen(clear_line + fullmsg, skip_eol=not is_last_line)
self.to_console_title('youtube-dl ' + msg)
def report_progress(self, s):
if s['status'] == 'finished':
if self.params.get('noprogress', False):
self.to_screen('[download] Download completed')
else:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
if s.get('elapsed') is not None:
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '100%% of %(_total_bytes_str)s in %(_elapsed_str)s'
else:
msg_template = '100%% of %(_total_bytes_str)s'
self._report_progress_status(
msg_template % s, is_last_line=True)
if self.params.get('noprogress'):
return
if s['status'] != 'downloading':
return
if s.get('eta') is not None:
s['_eta_str'] = self.format_eta(s['eta'])
else:
s['_eta_str'] = 'Unknown ETA'
if s.get('total_bytes') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes'])
elif s.get('total_bytes_estimate') and s.get('downloaded_bytes') is not None:
s['_percent_str'] = self.format_percent(100 * s['downloaded_bytes'] / s['total_bytes_estimate'])
else:
if s.get('downloaded_bytes') == 0:
s['_percent_str'] = self.format_percent(0)
else:
s['_percent_str'] = 'Unknown %'
if s.get('speed') is not None:
s['_speed_str'] = self.format_speed(s['speed'])
else:
s['_speed_str'] = 'Unknown speed'
if s.get('total_bytes') is not None:
s['_total_bytes_str'] = format_bytes(s['total_bytes'])
msg_template = '%(_percent_str)s of %(_total_bytes_str)s at %(_speed_str)s ETA %(_eta_str)s'
elif s.get('total_bytes_estimate') is not None:
s['_total_bytes_estimate_str'] = format_bytes(s['total_bytes_estimate'])
msg_template = '%(_percent_str)s of ~%(_total_bytes_estimate_str)s at %(_speed_str)s ETA %(_eta_str)s'
else:
if s.get('downloaded_bytes') is not None:
s['_downloaded_bytes_str'] = format_bytes(s['downloaded_bytes'])
if s.get('elapsed'):
s['_elapsed_str'] = self.format_seconds(s['elapsed'])
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s (%(_elapsed_str)s)'
else:
msg_template = '%(_downloaded_bytes_str)s at %(_speed_str)s'
else:
msg_template = '%(_percent_str)s % at %(_speed_str)s ETA %(_eta_str)s'
self._report_progress_status(msg_template % s)
def report_resuming_byte(self, resume_len):
"""Report attempt to resume at given byte."""
self.to_screen('[download] Resuming download at byte %s' % resume_len)
def report_retry(self, err, count, retries):
"""Report retry in case of HTTP error 5xx"""
self.to_screen(
'[download] Got server HTTP error: %s. Retrying (attempt %d of %s)...'
% (error_to_compat_str(err), count, self.format_retries(retries)))
def report_file_already_downloaded(self, file_name):
"""Report file has already been fully downloaded."""
try:
self.to_screen('[download] %s has already been downloaded' % file_name)
except UnicodeEncodeError:
self.to_screen('[download] The file has already been downloaded')
def report_unable_to_resume(self):
"""Report it was impossible to resume download."""
self.to_screen('[download] Unable to resume')
def download(self, filename, info_dict):
"""Download to a filename using the info from info_dict
Return True on success and False otherwise
"""
nooverwrites_and_exists = (
self.params.get('nooverwrites', False) and
os.path.exists(encodeFilename(filename))
)
if not hasattr(filename, 'write'):
continuedl_and_exists = (
self.params.get('continuedl', True) and
os.path.isfile(encodeFilename(filename)) and
not self.params.get('nopart', False)
)
# Check file already present
if filename != '-' and (nooverwrites_and_exists or continuedl_and_exists):
self.report_file_already_downloaded(filename)
self._hook_progress({
'filename': filename,
'status': 'finished',
'total_bytes': os.path.getsize(encodeFilename(filename)),
})
return True
min_sleep_interval = self.params.get('sleep_interval')
if min_sleep_interval:
max_sleep_interval = self.params.get('max_sleep_interval', min_sleep_interval)
sleep_interval = random.uniform(min_sleep_interval, max_sleep_interval)
self.to_screen(
'[download] Sleeping %s seconds...' % (
int(sleep_interval) if sleep_interval.is_integer()
else '%.2f' % sleep_interval))
time.sleep(sleep_interval)
return self.real_download(filename, info_dict)
def real_download(self, filename, info_dict):
"""Real download process. Redefine in subclasses."""
raise NotImplementedError('This method must be implemented by subclasses')
def _hook_progress(self, status):
for ph in self._progress_hooks:
ph(status)
def add_progress_hook(self, ph):
# See YoutubeDl.py (search for progress_hooks) for a description of
# this interface
self._progress_hooks.append(ph)
def _debug_cmd(self, args, exe=None):
if not self.params.get('verbose', False):
return
str_args = [decodeArgument(a) for a in args]
if exe is None:
exe = os.path.basename(str_args[0])
self.to_screen('[debug] %s command line: %s' % (
exe, shell_quote(str_args)))
| gpl-3.0 |
piffey/ansible | contrib/inventory/softlayer.py | 41 | 7172 | #!/usr/bin/env python
"""
SoftLayer external inventory script.
The SoftLayer Python API client is required. Use `pip install softlayer` to install it.
You have a few different options for configuring your username and api_key. You can pass
environment variables (SL_USERNAME and SL_API_KEY). You can also write INI file to
~/.softlayer or /etc/softlayer.conf. For more information see the SL API at:
- https://softlayer-python.readthedocs.org/en/latest/config_file.html
The SoftLayer Python client has a built in command for saving this configuration file
via the command `sl config setup`.
"""
# Copyright (C) 2014 AJ Bourg <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# I found the structure of the ec2.py script very helpful as an example
# as I put this together. Thanks to whoever wrote that script!
#
import SoftLayer
import re
import argparse
import itertools
try:
import json
except:
import simplejson as json
class SoftLayerInventory(object):
common_items = [
'id',
'globalIdentifier',
'hostname',
'domain',
'fullyQualifiedDomainName',
'primaryBackendIpAddress',
'primaryIpAddress',
'datacenter',
'tagReferences.tag.name',
'userData.value',
]
vs_items = [
'lastKnownPowerState.name',
'powerState',
'maxCpu',
'maxMemory',
'activeTransaction.transactionStatus[friendlyName,name]',
'status',
]
hw_items = [
'hardwareStatusId',
'processorPhysicalCoreAmount',
'memoryCapacity',
]
def _empty_inventory(self):
return {"_meta": {"hostvars": {}}}
def __init__(self):
'''Main path'''
self.inventory = self._empty_inventory()
self.parse_options()
if self.args.list:
self.get_all_servers()
print(self.json_format_dict(self.inventory, True))
elif self.args.host:
self.get_virtual_servers()
print(self.json_format_dict(self.inventory["_meta"]["hostvars"][self.args.host], True))
def to_safe(self, word):
'''Converts 'bad' characters in a string to underscores so they can be used as Ansible groups'''
return re.sub(r"[^A-Za-z0-9\-\.]", "_", word)
def push(self, my_dict, key, element):
'''Push an element onto an array that may not have been defined in the dict'''
if key in my_dict:
my_dict[key].append(element)
else:
my_dict[key] = [element]
def parse_options(self):
'''Parse all the arguments from the CLI'''
parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on SoftLayer')
parser.add_argument('--list', action='store_true', default=False,
help='List instances (default: False)')
parser.add_argument('--host', action='store',
help='Get all the variables about a specific instance')
self.args = parser.parse_args()
def json_format_dict(self, data, pretty=False):
'''Converts a dict to a JSON object and dumps it as a formatted string'''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data)
def process_instance(self, instance, instance_type="virtual"):
'''Populate the inventory dictionary with any instance information'''
# only want active instances
if 'status' in instance and instance['status']['name'] != 'Active':
return
# and powered on instances
if 'powerState' in instance and instance['powerState']['name'] != 'Running':
return
# 5 is active for hardware... see https://forums.softlayer.com/forum/softlayer-developer-network/general-discussion/2955-hardwarestatusid
if 'hardwareStatusId' in instance and instance['hardwareStatusId'] != 5:
return
# if there's no IP address, we can't reach it
if 'primaryIpAddress' not in instance:
return
instance['userData'] = instance['userData'][0]['value'] if instance['userData'] else ''
dest = instance['primaryIpAddress']
self.inventory["_meta"]["hostvars"][dest] = instance
# Inventory: group by memory
if 'maxMemory' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['maxMemory'])), dest)
elif 'memoryCapacity' in instance:
self.push(self.inventory, self.to_safe('memory_' + str(instance['memoryCapacity'])), dest)
# Inventory: group by cpu count
if 'maxCpu' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['maxCpu'])), dest)
elif 'processorPhysicalCoreAmount' in instance:
self.push(self.inventory, self.to_safe('cpu_' + str(instance['processorPhysicalCoreAmount'])), dest)
# Inventory: group by datacenter
self.push(self.inventory, self.to_safe('datacenter_' + instance['datacenter']['name']), dest)
# Inventory: group by hostname
self.push(self.inventory, self.to_safe(instance['hostname']), dest)
# Inventory: group by FQDN
self.push(self.inventory, self.to_safe(instance['fullyQualifiedDomainName']), dest)
# Inventory: group by domain
self.push(self.inventory, self.to_safe(instance['domain']), dest)
# Inventory: group by type (hardware/virtual)
self.push(self.inventory, instance_type, dest)
# Inventory: group by tag
for tag in instance['tagReferences']:
self.push(self.inventory, tag['tag']['name'], dest)
def get_virtual_servers(self):
'''Get all the CCI instances'''
vs = SoftLayer.VSManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.vs_items))
instances = vs.list_instances(mask=mask)
for instance in instances:
self.process_instance(instance)
def get_physical_servers(self):
'''Get all the hardware instances'''
hw = SoftLayer.HardwareManager(self.client)
mask = "mask[%s]" % ','.join(itertools.chain(self.common_items, self.hw_items))
instances = hw.list_hardware(mask=mask)
for instance in instances:
self.process_instance(instance, 'hardware')
def get_all_servers(self):
self.client = SoftLayer.Client()
self.get_virtual_servers()
self.get_physical_servers()
SoftLayerInventory()
| gpl-3.0 |
mrquim/mrquimrepo | script.module.exodus/lib/resources/lib/sources/de/streamdream.py | 5 | 4468 | # -*- coding: utf-8 -*-
"""
Exodus Add-on
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urllib
import urlparse
from resources.lib.modules import client
from resources.lib.modules import dom_parser
from resources.lib.modules import source_utils
class source:
def __init__(self):
self.priority = 1
self.language = ['de']
self.domains = ['streamdream.ws']
self.base_link = 'http://streamdream.ws'
self.search_link = '/searchy.php?ser=%s'
self.hoster_link = '/episodeholen2.php'
def movie(self, imdb, title, localtitle, aliases, year):
try:
imdb = re.sub('[^0-9]', '', imdb)
url = self.__search(imdb)
return urllib.urlencode({'url': url, 'imdb': imdb}) if url else None
except:
return
def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
try:
imdb = re.sub('[^0-9]', '', imdb)
url = self.__search(imdb)
return urllib.urlencode({'url': url, 'imdb': imdb}) if url else None
except:
return
def episode(self, url, imdb, tvdb, title, premiered, season, episode):
try:
if not url:
return
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
data.update({'season': season, 'episode': episode})
return urllib.urlencode(data)
except:
return
def sources(self, url, hostDict, hostprDict):
sources = []
try:
if not url:
return sources
data = urlparse.parse_qs(url)
data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
url = urlparse.urljoin(self.base_link, data.get('url'))
season = data.get('season')
episode = data.get('episode')
if season and episode:
r = urllib.urlencode({'imdbid': data['imdb'], 'language': 'de', 'season': season, 'episode': episode})
r = client.request(urlparse.urljoin(self.base_link, self.hoster_link), XHR=True, post=r)
else:
r = client.request(url)
r = dom_parser.parse_dom(r, 'div', attrs={'class': 'linkbox'})[0].content
r = re.compile('(<a.+?/a>)', re.DOTALL).findall(r)
r = [(dom_parser.parse_dom(i, 'a', req='href'), dom_parser.parse_dom(i, 'img', attrs={'class': re.compile('.*linkbutton')}, req='class')) for i in r]
r = [(i[0][0].attrs['href'], i[1][0].attrs['class'].lower()) for i in r if i[0] and i[1]]
r = [(i[0].strip(), 'HD' if i[1].startswith('hd') else 'SD') for i in r]
for url, quli in r:
valid, host = source_utils.is_host_valid(url, hostDict)
if not valid: continue
sources.append({'source': host, 'quality': quli, 'language': 'de', 'url': url, 'direct': False, 'debridonly': False})
return sources
except:
return sources
def resolve(self, url):
return url
def __search(self, imdb):
try:
r = client.request(urlparse.urljoin(self.base_link, self.search_link % imdb))
r = dom_parser.parse_dom(r, 'a', req='href')
r = [i.attrs['href'] for i in r if i]
if len(r) > 1:
for i in r:
data = client.request(urlparse.urljoin(self.base_link, i))
data = re.compile('(imdbid\s*[=|:]\s*"%s"\s*,)' % imdb, re.DOTALL).findall(data)
if len(data) >= 1:
url = i
else:
url = r[0]
if url:
return source_utils.strip_domain(url)
except:
return
| gpl-2.0 |
Tufin/pytos | pytos/securechange/xml_objects/restapi/step/access_request/analysisresult.py | 1 | 1035 |
from pytos.securechange.xml_objects.restapi.step.initialize import *
class Analysis_Result(XML_Object_Base):
IMPLEMENTED = "implemented"
NOT_AVAILABLE = "not available"
NOT_IMPLEMENTED = "not implemented"
NOT_RUN = "not run"
VERIFIED = "verified"
def __init__(self, xml_tag, status):
self.status = status
super().__init__(xml_tag)
def is_not_run(self):
if self.status == Analysis_Result.NOT_RUN:
return True
else:
return False
def is_not_available(self):
if self.status == Analysis_Result.NOT_AVAILABLE:
return True
else:
return False
@classmethod
def from_xml_node(cls, xml_node):
"""
Initialize the object from a XML node.
:param xml_node: The XML node from which all necessary parameters will be parsed.
:type xml_node: xml.etree.Element
"""
raise NotImplementedError(
"from_xml_node must be implemented by derived classes.") | apache-2.0 |
QijunPan/ansible | lib/ansible/utils/module_docs_fragments/aws.py | 14 | 3154 | # (c) 2014, Will Thames <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
class ModuleDocFragment(object):
# AWS only documentation fragment
DOCUMENTATION = """
options:
ec2_url:
description:
- Url to use to connect to EC2 or your Eucalyptus cloud (by default the module will use EC2 endpoints). Ignored for modules where region is required. Must be specified for all other modules if region is not used. If not set then the value of the EC2_URL environment variable, if any, is used.
required: false
default: null
aliases: []
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_ACCESS_KEY, AWS_SECRET_KEY, or EC2_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY_ID, AWS_ACCESS_KEY or EC2_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
security_token:
description:
- AWS STS security token. If not set then the value of the AWS_SECURITY_TOKEN or EC2_SECURITY_TOKEN environment variable is used.
required: false
default: null
aliases: [ 'access_token' ]
version_added: "1.6"
validate_certs:
description:
- When set to "no", SSL certificates will not be validated for boto versions >= 2.6.0.
required: false
default: "yes"
choices: ["yes", "no"]
aliases: []
version_added: "1.5"
profile:
description:
- Uses a boto profile. Only works with boto >= 2.24.0.
required: false
default: null
aliases: []
version_added: "1.6"
requirements:
- "python >= 2.6"
- boto
notes:
- If parameters are not set within the module, the following
environment variables can be used in decreasing order of precedence
C(AWS_URL) or C(EC2_URL),
C(AWS_ACCESS_KEY_ID) or C(AWS_ACCESS_KEY) or C(EC2_ACCESS_KEY),
C(AWS_SECRET_ACCESS_KEY) or C(AWS_SECRET_KEY) or C(EC2_SECRET_KEY),
C(AWS_SECURITY_TOKEN) or C(EC2_SECURITY_TOKEN),
C(AWS_REGION) or C(EC2_REGION)
- Ansible uses the boto configuration file (typically ~/.boto) if no
credentials are provided. See http://boto.readthedocs.org/en/latest/boto_config_tut.html
- C(AWS_REGION) or C(EC2_REGION) can be typically be used to specify the
AWS region, when required, but this can also be configured in the boto config file
"""
| gpl-3.0 |
ehomeshasha/easydata | pdf/forms.py | 1 | 1391 | from __future__ import unicode_literals
from easydata.constant import CONTENT_TYPE
from django import forms
from django.utils.translation import ugettext_lazy as _
class PDFUploadForm(forms.Form):
title = forms.CharField(
label=_("Title"),
min_length=2,
max_length=100,
widget=forms.TextInput(),
required=True
)
description = forms.CharField(
label=_("Description"),
widget=forms.Textarea(),
required=True
)
store_file = forms.FileField(
label=_("PDF file"),
widget=forms.FileInput(),
required=True
)
def clean_store_file(self):
store_file = self.cleaned_data.get('store_file')
if store_file._size > 150*1024*1024:
raise forms.ValidationError(_("file too large ( > 150mb )"))
if store_file.content_type != CONTENT_TYPE['pdf']:
raise forms.ValidationError(_("invalid file type, must be pdf"))
end_pos = store_file._name.rfind(".")
ext = store_file._name[end_pos+1:]
if ext != 'pdf':
raise forms.ValidationError(_("invalid file suffix, must be .pdf"))
class PDFCommentForm(forms.Form):
title = forms.CharField(
label=_("Title"),
min_length=2,
max_length=100,
widget=forms.TextInput(),
required=True
)
| mit |
mancoast/CPythonPyc_test | cpython/240_test_itertools.py | 10 | 32935 | import unittest
from test import test_support
from itertools import *
from weakref import proxy
import sys
import operator
import random
def onearg(x):
'Test function of one argument'
return 2*x
def errfunc(*args):
'Test function that raises an error'
raise ValueError
def gen3():
'Non-restartable source sequence'
for i in (0, 1, 2):
yield i
def isEven(x):
'Test predicate'
return x%2==0
def isOdd(x):
'Test predicate'
return x%2==1
class StopNow:
'Class emulating an empty iterable.'
def __iter__(self):
return self
def next(self):
raise StopIteration
def take(n, seq):
'Convenience function for partially consuming a long of infinite iterable'
return list(islice(seq, n))
class TestBasicOps(unittest.TestCase):
def test_chain(self):
self.assertEqual(list(chain('abc', 'def')), list('abcdef'))
self.assertEqual(list(chain('abc')), list('abc'))
self.assertEqual(list(chain('')), [])
self.assertEqual(take(4, chain('abc', 'def')), list('abcd'))
self.assertRaises(TypeError, chain, 2, 3)
def test_count(self):
self.assertEqual(zip('abc',count()), [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(zip('abc',count(3)), [('a', 3), ('b', 4), ('c', 5)])
self.assertEqual(take(2, zip('abc',count(3))), [('a', 3), ('b', 4)])
self.assertRaises(TypeError, count, 2, 3)
self.assertRaises(TypeError, count, 'a')
c = count(sys.maxint-2) # verify that rollover doesn't crash
c.next(); c.next(); c.next(); c.next(); c.next()
c = count(3)
self.assertEqual(repr(c), 'count(3)')
c.next()
self.assertEqual(repr(c), 'count(4)')
def test_cycle(self):
self.assertEqual(take(10, cycle('abc')), list('abcabcabca'))
self.assertEqual(list(cycle('')), [])
self.assertRaises(TypeError, cycle)
self.assertRaises(TypeError, cycle, 5)
self.assertEqual(list(islice(cycle(gen3()),10)), [0,1,2,0,1,2,0,1,2,0])
def test_groupby(self):
# Check whether it accepts arguments correctly
self.assertEqual([], list(groupby([])))
self.assertEqual([], list(groupby([], key=id)))
self.assertRaises(TypeError, list, groupby('abc', []))
self.assertRaises(TypeError, groupby, None)
self.assertRaises(TypeError, groupby, 'abc', lambda x:x, 10)
# Check normal input
s = [(0, 10, 20), (0, 11,21), (0,12,21), (1,13,21), (1,14,22),
(2,15,22), (3,16,23), (3,17,23)]
dup = []
for k, g in groupby(s, lambda r:r[0]):
for elem in g:
self.assertEqual(k, elem[0])
dup.append(elem)
self.assertEqual(s, dup)
# Check nested case
dup = []
for k, g in groupby(s, lambda r:r[0]):
for ik, ig in groupby(g, lambda r:r[2]):
for elem in ig:
self.assertEqual(k, elem[0])
self.assertEqual(ik, elem[2])
dup.append(elem)
self.assertEqual(s, dup)
# Check case where inner iterator is not used
keys = [k for k, g in groupby(s, lambda r:r[0])]
expectedkeys = set([r[0] for r in s])
self.assertEqual(set(keys), expectedkeys)
self.assertEqual(len(keys), len(expectedkeys))
# Exercise pipes and filters style
s = 'abracadabra'
# sort s | uniq
r = [k for k, g in groupby(sorted(s))]
self.assertEqual(r, ['a', 'b', 'c', 'd', 'r'])
# sort s | uniq -d
r = [k for k, g in groupby(sorted(s)) if list(islice(g,1,2))]
self.assertEqual(r, ['a', 'b', 'r'])
# sort s | uniq -c
r = [(len(list(g)), k) for k, g in groupby(sorted(s))]
self.assertEqual(r, [(5, 'a'), (2, 'b'), (1, 'c'), (1, 'd'), (2, 'r')])
# sort s | uniq -c | sort -rn | head -3
r = sorted([(len(list(g)) , k) for k, g in groupby(sorted(s))], reverse=True)[:3]
self.assertEqual(r, [(5, 'a'), (2, 'r'), (2, 'b')])
# iter.next failure
class ExpectedError(Exception):
pass
def delayed_raise(n=0):
for i in range(n):
yield 'yo'
raise ExpectedError
def gulp(iterable, keyp=None, func=list):
return [func(g) for k, g in groupby(iterable, keyp)]
# iter.next failure on outer object
self.assertRaises(ExpectedError, gulp, delayed_raise(0))
# iter.next failure on inner object
self.assertRaises(ExpectedError, gulp, delayed_raise(1))
# __cmp__ failure
class DummyCmp:
def __cmp__(self, dst):
raise ExpectedError
s = [DummyCmp(), DummyCmp(), None]
# __cmp__ failure on outer object
self.assertRaises(ExpectedError, gulp, s, func=id)
# __cmp__ failure on inner object
self.assertRaises(ExpectedError, gulp, s)
# keyfunc failure
def keyfunc(obj):
if keyfunc.skip > 0:
keyfunc.skip -= 1
return obj
else:
raise ExpectedError
# keyfunc failure on outer object
keyfunc.skip = 0
self.assertRaises(ExpectedError, gulp, [None], keyfunc)
keyfunc.skip = 1
self.assertRaises(ExpectedError, gulp, [None, None], keyfunc)
def test_ifilter(self):
self.assertEqual(list(ifilter(isEven, range(6))), [0,2,4])
self.assertEqual(list(ifilter(None, [0,1,0,2,0])), [1,2])
self.assertEqual(take(4, ifilter(isEven, count())), [0,2,4,6])
self.assertRaises(TypeError, ifilter)
self.assertRaises(TypeError, ifilter, lambda x:x)
self.assertRaises(TypeError, ifilter, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilter, isEven, 3)
self.assertRaises(TypeError, ifilter(range(6), range(6)).next)
def test_ifilterfalse(self):
self.assertEqual(list(ifilterfalse(isEven, range(6))), [1,3,5])
self.assertEqual(list(ifilterfalse(None, [0,1,0,2,0])), [0,0,0])
self.assertEqual(take(4, ifilterfalse(isEven, count())), [1,3,5,7])
self.assertRaises(TypeError, ifilterfalse)
self.assertRaises(TypeError, ifilterfalse, lambda x:x)
self.assertRaises(TypeError, ifilterfalse, lambda x:x, range(6), 7)
self.assertRaises(TypeError, ifilterfalse, isEven, 3)
self.assertRaises(TypeError, ifilterfalse(range(6), range(6)).next)
def test_izip(self):
ans = [(x,y) for x, y in izip('abc',count())]
self.assertEqual(ans, [('a', 0), ('b', 1), ('c', 2)])
self.assertEqual(list(izip('abc', range(6))), zip('abc', range(6)))
self.assertEqual(list(izip('abcdef', range(3))), zip('abcdef', range(3)))
self.assertEqual(take(3,izip('abcdef', count())), zip('abcdef', range(3)))
self.assertEqual(list(izip('abcdef')), zip('abcdef'))
self.assertEqual(list(izip()), zip())
self.assertRaises(TypeError, izip, 3)
self.assertRaises(TypeError, izip, range(3), 3)
# Check tuple re-use (implementation detail)
self.assertEqual([tuple(list(pair)) for pair in izip('abc', 'def')],
zip('abc', 'def'))
self.assertEqual([pair for pair in izip('abc', 'def')],
zip('abc', 'def'))
ids = map(id, izip('abc', 'def'))
self.assertEqual(min(ids), max(ids))
ids = map(id, list(izip('abc', 'def')))
self.assertEqual(len(dict.fromkeys(ids)), len(ids))
def test_repeat(self):
self.assertEqual(zip(xrange(3),repeat('a')),
[(0, 'a'), (1, 'a'), (2, 'a')])
self.assertEqual(list(repeat('a', 3)), ['a', 'a', 'a'])
self.assertEqual(take(3, repeat('a')), ['a', 'a', 'a'])
self.assertEqual(list(repeat('a', 0)), [])
self.assertEqual(list(repeat('a', -3)), [])
self.assertRaises(TypeError, repeat)
self.assertRaises(TypeError, repeat, None, 3, 4)
self.assertRaises(TypeError, repeat, None, 'a')
r = repeat(1+0j)
self.assertEqual(repr(r), 'repeat((1+0j))')
r = repeat(1+0j, 5)
self.assertEqual(repr(r), 'repeat((1+0j), 5)')
list(r)
self.assertEqual(repr(r), 'repeat((1+0j), 0)')
def test_imap(self):
self.assertEqual(list(imap(operator.pow, range(3), range(1,7))),
[0**1, 1**2, 2**3])
self.assertEqual(list(imap(None, 'abc', range(5))),
[('a',0),('b',1),('c',2)])
self.assertEqual(list(imap(None, 'abc', count())),
[('a',0),('b',1),('c',2)])
self.assertEqual(take(2,imap(None, 'abc', count())),
[('a',0),('b',1)])
self.assertEqual(list(imap(operator.pow, [])), [])
self.assertRaises(TypeError, imap)
self.assertRaises(TypeError, imap, operator.neg)
self.assertRaises(TypeError, imap(10, range(5)).next)
self.assertRaises(ValueError, imap(errfunc, [4], [5]).next)
self.assertRaises(TypeError, imap(onearg, [4], [5]).next)
def test_starmap(self):
self.assertEqual(list(starmap(operator.pow, zip(range(3), range(1,7)))),
[0**1, 1**2, 2**3])
self.assertEqual(take(3, starmap(operator.pow, izip(count(), count(1)))),
[0**1, 1**2, 2**3])
self.assertEqual(list(starmap(operator.pow, [])), [])
self.assertRaises(TypeError, list, starmap(operator.pow, [[4,5]]))
self.assertRaises(TypeError, starmap)
self.assertRaises(TypeError, starmap, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, starmap(10, [(4,5)]).next)
self.assertRaises(ValueError, starmap(errfunc, [(4,5)]).next)
self.assertRaises(TypeError, starmap(onearg, [(4,5)]).next)
def test_islice(self):
for args in [ # islice(args) should agree with range(args)
(10, 20, 3),
(10, 3, 20),
(10, 20),
(10, 3),
(20,)
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*args))
for args, tgtargs in [ # Stop when seqn is exhausted
((10, 110, 3), ((10, 100, 3))),
((10, 110), ((10, 100))),
((110,), (100,))
]:
self.assertEqual(list(islice(xrange(100), *args)), range(*tgtargs))
# Test stop=None
self.assertEqual(list(islice(xrange(10), None)), range(10))
self.assertEqual(list(islice(xrange(10), 2, None)), range(2, 10))
self.assertEqual(list(islice(xrange(10), 1, None, 2)), range(1, 10, 2))
# Test invalid arguments
self.assertRaises(TypeError, islice, xrange(10))
self.assertRaises(TypeError, islice, xrange(10), 1, 2, 3, 4)
self.assertRaises(ValueError, islice, xrange(10), -5, 10, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, -5, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, -1)
self.assertRaises(ValueError, islice, xrange(10), 1, 10, 0)
self.assertRaises(ValueError, islice, xrange(10), 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a')
self.assertRaises(ValueError, islice, xrange(10), 'a', 1, 1)
self.assertRaises(ValueError, islice, xrange(10), 1, 'a', 1)
self.assertEqual(len(list(islice(count(), 1, 10, sys.maxint))), 1)
def test_takewhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(takewhile(underten, data)), [1, 3, 5])
self.assertEqual(list(takewhile(underten, [])), [])
self.assertRaises(TypeError, takewhile)
self.assertRaises(TypeError, takewhile, operator.pow)
self.assertRaises(TypeError, takewhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, takewhile(10, [(4,5)]).next)
self.assertRaises(ValueError, takewhile(errfunc, [(4,5)]).next)
t = takewhile(bool, [1, 1, 1, 0, 0, 0])
self.assertEqual(list(t), [1, 1, 1])
self.assertRaises(StopIteration, t.next)
def test_dropwhile(self):
data = [1, 3, 5, 20, 2, 4, 6, 8]
underten = lambda x: x<10
self.assertEqual(list(dropwhile(underten, data)), [20, 2, 4, 6, 8])
self.assertEqual(list(dropwhile(underten, [])), [])
self.assertRaises(TypeError, dropwhile)
self.assertRaises(TypeError, dropwhile, operator.pow)
self.assertRaises(TypeError, dropwhile, operator.pow, [(4,5)], 'extra')
self.assertRaises(TypeError, dropwhile(10, [(4,5)]).next)
self.assertRaises(ValueError, dropwhile(errfunc, [(4,5)]).next)
def test_tee(self):
n = 200
def irange(n):
for i in xrange(n):
yield i
a, b = tee([]) # test empty iterator
self.assertEqual(list(a), [])
self.assertEqual(list(b), [])
a, b = tee(irange(n)) # test 100% interleaved
self.assertEqual(zip(a,b), zip(range(n),range(n)))
a, b = tee(irange(n)) # test 0% interleaved
self.assertEqual(list(a), range(n))
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of leading iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del a
self.assertEqual(list(b), range(n))
a, b = tee(irange(n)) # test dealloc of trailing iterator
for i in xrange(100):
self.assertEqual(a.next(), i)
del b
self.assertEqual(list(a), range(100, n))
for j in xrange(5): # test randomly interleaved
order = [0]*n + [1]*n
random.shuffle(order)
lists = ([], [])
its = tee(irange(n))
for i in order:
value = its[i].next()
lists[i].append(value)
self.assertEqual(lists[0], range(n))
self.assertEqual(lists[1], range(n))
# test argument format checking
self.assertRaises(TypeError, tee)
self.assertRaises(TypeError, tee, 3)
self.assertRaises(TypeError, tee, [1,2], 'x')
self.assertRaises(TypeError, tee, [1,2], 3, 'x')
# tee object should be instantiable
a, b = tee('abc')
c = type(a)('def')
self.assertEqual(list(c), list('def'))
# test long-lagged and multi-way split
a, b, c = tee(xrange(2000), 3)
for i in xrange(100):
self.assertEqual(a.next(), i)
self.assertEqual(list(b), range(2000))
self.assertEqual([c.next(), c.next()], range(2))
self.assertEqual(list(a), range(100,2000))
self.assertEqual(list(c), range(2,2000))
# test values of n
self.assertRaises(TypeError, tee, 'abc', 'invalid')
for n in xrange(5):
result = tee('abc', n)
self.assertEqual(type(result), tuple)
self.assertEqual(len(result), n)
self.assertEqual(map(list, result), [list('abc')]*n)
# tee pass-through to copyable iterator
a, b = tee('abc')
c, d = tee(a)
self.assert_(a is c)
# test tee_new
t1, t2 = tee('abc')
tnew = type(t1)
self.assertRaises(TypeError, tnew)
self.assertRaises(TypeError, tnew, 10)
t3 = tnew(t1)
self.assert_(list(t1) == list(t2) == list(t3) == list('abc'))
# test that tee objects are weak referencable
a, b = tee(xrange(10))
p = proxy(a)
self.assertEqual(getattr(p, '__class__'), type(b))
del a
self.assertRaises(ReferenceError, getattr, p, '__class__')
def test_StopIteration(self):
self.assertRaises(StopIteration, izip().next)
for f in (chain, cycle, izip, groupby):
self.assertRaises(StopIteration, f([]).next)
self.assertRaises(StopIteration, f(StopNow()).next)
self.assertRaises(StopIteration, islice([], None).next)
self.assertRaises(StopIteration, islice(StopNow(), None).next)
p, q = tee([])
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
p, q = tee(StopNow())
self.assertRaises(StopIteration, p.next)
self.assertRaises(StopIteration, q.next)
self.assertRaises(StopIteration, repeat(None, 0).next)
for f in (ifilter, ifilterfalse, imap, takewhile, dropwhile, starmap):
self.assertRaises(StopIteration, f(lambda x:x, []).next)
self.assertRaises(StopIteration, f(lambda x:x, StopNow()).next)
class TestGC(unittest.TestCase):
def makecycle(self, iterator, container):
container.append(iterator)
iterator.next()
del container, iterator
def test_chain(self):
a = []
self.makecycle(chain(a), a)
def test_cycle(self):
a = []
self.makecycle(cycle([a]*2), a)
def test_dropwhile(self):
a = []
self.makecycle(dropwhile(bool, [0, a, a]), a)
def test_groupby(self):
a = []
self.makecycle(groupby([a]*2, lambda x:x), a)
def test_ifilter(self):
a = []
self.makecycle(ifilter(lambda x:True, [a]*2), a)
def test_ifilterfalse(self):
a = []
self.makecycle(ifilterfalse(lambda x:False, a), a)
def test_izip(self):
a = []
self.makecycle(izip([a]*2, [a]*3), a)
def test_imap(self):
a = []
self.makecycle(imap(lambda x:x, [a]*2), a)
def test_islice(self):
a = []
self.makecycle(islice([a]*2, None), a)
def test_repeat(self):
a = []
self.makecycle(repeat(a), a)
def test_starmap(self):
a = []
self.makecycle(starmap(lambda *t: t, [(a,a)]*2), a)
def test_takewhile(self):
a = []
self.makecycle(takewhile(bool, [1, 0, a, a]), a)
def R(seqn):
'Regular generator'
for i in seqn:
yield i
class G:
'Sequence using __getitem__'
def __init__(self, seqn):
self.seqn = seqn
def __getitem__(self, i):
return self.seqn[i]
class I:
'Sequence using iterator protocol'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class Ig:
'Sequence using iterator protocol defined with a generator'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
for val in self.seqn:
yield val
class X:
'Missing __getitem__ and __iter__'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def next(self):
if self.i >= len(self.seqn): raise StopIteration
v = self.seqn[self.i]
self.i += 1
return v
class N:
'Iterator missing next()'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
class E:
'Test propagation of exceptions'
def __init__(self, seqn):
self.seqn = seqn
self.i = 0
def __iter__(self):
return self
def next(self):
3 // 0
class S:
'Test immediate stop'
def __init__(self, seqn):
pass
def __iter__(self):
return self
def next(self):
raise StopIteration
def L(seqn):
'Test multiple tiers of iterators'
return chain(imap(lambda x:x, R(Ig(G(seqn)))))
class TestVariousIteratorArgs(unittest.TestCase):
def test_chain(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(chain(g(s))), list(g(s)))
self.assertEqual(list(chain(g(s), g(s))), list(g(s))+list(g(s)))
self.assertRaises(TypeError, chain, X(s))
self.assertRaises(TypeError, list, chain(N(s)))
self.assertRaises(ZeroDivisionError, list, chain(E(s)))
def test_cycle(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgtlen = len(s) * 3
expected = list(g(s))*3
actual = list(islice(cycle(g(s)), tgtlen))
self.assertEqual(actual, expected)
self.assertRaises(TypeError, cycle, X(s))
self.assertRaises(TypeError, list, cycle(N(s)))
self.assertRaises(ZeroDivisionError, list, cycle(E(s)))
def test_groupby(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual([k for k, sb in groupby(g(s))], list(g(s)))
self.assertRaises(TypeError, groupby, X(s))
self.assertRaises(TypeError, list, groupby(N(s)))
self.assertRaises(ZeroDivisionError, list, groupby(E(s)))
def test_ifilter(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilter(isEven, g(s))), filter(isEven, g(s)))
self.assertRaises(TypeError, ifilter, isEven, X(s))
self.assertRaises(TypeError, list, ifilter(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilter(isEven, E(s)))
def test_ifilterfalse(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(ifilterfalse(isEven, g(s))), filter(isOdd, g(s)))
self.assertRaises(TypeError, ifilterfalse, isEven, X(s))
self.assertRaises(TypeError, list, ifilterfalse(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, ifilterfalse(isEven, E(s)))
def test_izip(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(izip(g(s))), zip(g(s)))
self.assertEqual(list(izip(g(s), g(s))), zip(g(s), g(s)))
self.assertRaises(TypeError, izip, X(s))
self.assertRaises(TypeError, list, izip(N(s)))
self.assertRaises(ZeroDivisionError, list, izip(E(s)))
def test_imap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(imap(onearg, g(s))), map(onearg, g(s)))
self.assertEqual(list(imap(operator.pow, g(s), g(s))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, imap, onearg, X(s))
self.assertRaises(TypeError, list, imap(onearg, N(s)))
self.assertRaises(ZeroDivisionError, list, imap(onearg, E(s)))
def test_islice(self):
for s in ("12345", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
self.assertEqual(list(islice(g(s),1,None,2)), list(g(s))[1::2])
self.assertRaises(TypeError, islice, X(s), 10)
self.assertRaises(TypeError, list, islice(N(s), 10))
self.assertRaises(ZeroDivisionError, list, islice(E(s), 10))
def test_starmap(self):
for s in (range(10), range(0), range(100), (7,11), xrange(20,50,5)):
for g in (G, I, Ig, S, L, R):
ss = zip(s, s)
self.assertEqual(list(starmap(operator.pow, g(ss))), map(operator.pow, g(s), g(s)))
self.assertRaises(TypeError, starmap, operator.pow, X(ss))
self.assertRaises(TypeError, list, starmap(operator.pow, N(ss)))
self.assertRaises(ZeroDivisionError, list, starmap(operator.pow, E(ss)))
def test_takewhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not isEven(elem): break
tgt.append(elem)
self.assertEqual(list(takewhile(isEven, g(s))), tgt)
self.assertRaises(TypeError, takewhile, isEven, X(s))
self.assertRaises(TypeError, list, takewhile(isEven, N(s)))
self.assertRaises(ZeroDivisionError, list, takewhile(isEven, E(s)))
def test_dropwhile(self):
for s in (range(10), range(0), range(1000), (7,11), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
tgt = []
for elem in g(s):
if not tgt and isOdd(elem): continue
tgt.append(elem)
self.assertEqual(list(dropwhile(isOdd, g(s))), tgt)
self.assertRaises(TypeError, dropwhile, isOdd, X(s))
self.assertRaises(TypeError, list, dropwhile(isOdd, N(s)))
self.assertRaises(ZeroDivisionError, list, dropwhile(isOdd, E(s)))
def test_tee(self):
for s in ("123", "", range(1000), ('do', 1.2), xrange(2000,2200,5)):
for g in (G, I, Ig, S, L, R):
it1, it2 = tee(g(s))
self.assertEqual(list(it1), list(g(s)))
self.assertEqual(list(it2), list(g(s)))
self.assertRaises(TypeError, tee, X(s))
self.assertRaises(TypeError, list, tee(N(s))[0])
self.assertRaises(ZeroDivisionError, list, tee(E(s))[0])
class LengthTransparency(unittest.TestCase):
def test_repeat(self):
self.assertEqual(len(repeat(None, 50)), 50)
self.assertRaises(TypeError, len, repeat(None))
class RegressionTests(unittest.TestCase):
def test_sf_793826(self):
# Fix Armin Rigo's successful efforts to wreak havoc
def mutatingtuple(tuple1, f, tuple2):
# this builds a tuple t which is a copy of tuple1,
# then calls f(t), then mutates t to be equal to tuple2
# (needs len(tuple1) == len(tuple2)).
def g(value, first=[1]):
if first:
del first[:]
f(z.next())
return value
items = list(tuple2)
items[1:1] = list(tuple1)
gen = imap(g, items)
z = izip(*[gen]*len(tuple1))
z.next()
def f(t):
global T
T = t
first[:] = list(T)
first = []
mutatingtuple((1,2,3), f, (4,5,6))
second = list(T)
self.assertEqual(first, second)
def test_sf_950057(self):
# Make sure that chain() and cycle() catch exceptions immediately
# rather than when shifting between input sources
def gen1():
hist.append(0)
yield 1
hist.append(1)
raise AssertionError
hist.append(2)
def gen2(x):
hist.append(3)
yield 2
hist.append(4)
if x:
raise StopIteration
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(False)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, chain(gen1(), gen2(True)))
self.assertEqual(hist, [0,1])
hist = []
self.assertRaises(AssertionError, list, cycle(gen1()))
self.assertEqual(hist, [0,1])
libreftest = """ Doctest for examples in the library reference: libitertools.tex
>>> amounts = [120.15, 764.05, 823.14]
>>> for checknum, amount in izip(count(1200), amounts):
... print 'Check %d is for $%.2f' % (checknum, amount)
...
Check 1200 is for $120.15
Check 1201 is for $764.05
Check 1202 is for $823.14
>>> import operator
>>> for cube in imap(operator.pow, xrange(1,4), repeat(3)):
... print cube
...
1
8
27
>>> reportlines = ['EuroPython', 'Roster', '', 'alex', '', 'laura', '', 'martin', '', 'walter', '', 'samuele']
>>> for name in islice(reportlines, 3, None, 2):
... print name.title()
...
Alex
Laura
Martin
Walter
Samuele
>>> from operator import itemgetter
>>> d = dict(a=1, b=2, c=1, d=2, e=1, f=2, g=3)
>>> di = sorted(d.iteritems(), key=itemgetter(1))
>>> for k, g in groupby(di, itemgetter(1)):
... print k, map(itemgetter(0), g)
...
1 ['a', 'c', 'e']
2 ['b', 'd', 'f']
3 ['g']
# Find runs of consecutive numbers using groupby. The key to the solution
# is differencing with a range so that consecutive numbers all appear in
# same group.
>>> data = [ 1, 4,5,6, 10, 15,16,17,18, 22, 25,26,27,28]
>>> for k, g in groupby(enumerate(data), lambda (i,x):i-x):
... print map(operator.itemgetter(1), g)
...
[1]
[4, 5, 6]
[10]
[15, 16, 17, 18]
[22]
[25, 26, 27, 28]
>>> def take(n, seq):
... return list(islice(seq, n))
>>> def enumerate(iterable):
... return izip(count(), iterable)
>>> def tabulate(function):
... "Return function(0), function(1), ..."
... return imap(function, count())
>>> def iteritems(mapping):
... return izip(mapping.iterkeys(), mapping.itervalues())
>>> def nth(iterable, n):
... "Returns the nth item"
... return list(islice(iterable, n, n+1))
>>> def all(seq, pred=bool):
... "Returns True if pred(x) is True for every element in the iterable"
... for elem in ifilterfalse(pred, seq):
... return False
... return True
>>> def any(seq, pred=bool):
... "Returns True if pred(x) is True for at least one element in the iterable"
... for elem in ifilter(pred, seq):
... return True
... return False
>>> def no(seq, pred=bool):
... "Returns True if pred(x) is False for every element in the iterable"
... for elem in ifilter(pred, seq):
... return False
... return True
>>> def quantify(seq, pred=bool):
... "Count how many times the predicate is True in the sequence"
... return sum(imap(pred, seq))
>>> def padnone(seq):
... "Returns the sequence elements and then returns None indefinitely"
... return chain(seq, repeat(None))
>>> def ncycles(seq, n):
... "Returns the sequence elements n times"
... return chain(*repeat(seq, n))
>>> def dotproduct(vec1, vec2):
... return sum(imap(operator.mul, vec1, vec2))
>>> def flatten(listOfLists):
... return list(chain(*listOfLists))
>>> def repeatfunc(func, times=None, *args):
... "Repeat calls to func with specified arguments."
... " Example: repeatfunc(random.random)"
... if times is None:
... return starmap(func, repeat(args))
... else:
... return starmap(func, repeat(args, times))
>>> def pairwise(iterable):
... "s -> (s0,s1), (s1,s2), (s2, s3), ..."
... a, b = tee(iterable)
... try:
... b.next()
... except StopIteration:
... pass
... return izip(a, b)
This is not part of the examples but it tests to make sure the definitions
perform as purported.
>>> take(10, count())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> list(enumerate('abc'))
[(0, 'a'), (1, 'b'), (2, 'c')]
>>> list(islice(tabulate(lambda x: 2*x), 4))
[0, 2, 4, 6]
>>> nth('abcde', 3)
['d']
>>> all([2, 4, 6, 8], lambda x: x%2==0)
True
>>> all([2, 3, 6, 8], lambda x: x%2==0)
False
>>> any([2, 4, 6, 8], lambda x: x%2==0)
True
>>> any([1, 3, 5, 9], lambda x: x%2==0,)
False
>>> no([1, 3, 5, 9], lambda x: x%2==0)
True
>>> no([1, 2, 5, 9], lambda x: x%2==0)
False
>>> quantify(xrange(99), lambda x: x%2==0)
50
>>> a = [[1, 2, 3], [4, 5, 6]]
>>> flatten(a)
[1, 2, 3, 4, 5, 6]
>>> list(repeatfunc(pow, 5, 2, 3))
[8, 8, 8, 8, 8]
>>> import random
>>> take(5, imap(int, repeatfunc(random.random)))
[0, 0, 0, 0, 0]
>>> list(pairwise('abcd'))
[('a', 'b'), ('b', 'c'), ('c', 'd')]
>>> list(pairwise([]))
[]
>>> list(pairwise('a'))
[]
>>> list(islice(padnone('abc'), 0, 6))
['a', 'b', 'c', None, None, None]
>>> list(ncycles('abc', 3))
['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
>>> dotproduct([1,2,3], [4,5,6])
32
"""
__test__ = {'libreftest' : libreftest}
def test_main(verbose=None):
test_classes = (TestBasicOps, TestVariousIteratorArgs, TestGC,
RegressionTests, LengthTransparency)
test_support.run_unittest(*test_classes)
# verify reference counting
if verbose and hasattr(sys, "gettotalrefcount"):
import gc
counts = [None] * 5
for i in xrange(len(counts)):
test_support.run_unittest(*test_classes)
gc.collect()
counts[i] = sys.gettotalrefcount()
print counts
# doctest the examples in the library reference
test_support.run_doctest(sys.modules[__name__], verbose)
if __name__ == "__main__":
test_main(verbose=True)
| gpl-3.0 |
abhikumar22/MYBLOG | blg/Lib/site-packages/django-1.11.7-py3.6.egg/django/core/files/storage.py | 51 | 18802 | import errno
import os
import warnings
from datetime import datetime
from django.conf import settings
from django.core.exceptions import SuspiciousFileOperation
from django.core.files import File, locks
from django.core.files.move import file_move_safe
from django.core.signals import setting_changed
from django.utils import timezone
from django.utils._os import abspathu, safe_join
from django.utils.crypto import get_random_string
from django.utils.deconstruct import deconstructible
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.encoding import filepath_to_uri, force_text
from django.utils.functional import LazyObject, cached_property
from django.utils.module_loading import import_string
from django.utils.six.moves.urllib.parse import urljoin
from django.utils.text import get_valid_filename
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb'):
"""
Retrieves the specified file from storage.
"""
return self._open(name, mode)
def save(self, name, content, max_length=None):
"""
Saves new content to the file specified by name. The content should be
a proper File object or any python file-like object, ready to be read
from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
if not hasattr(content, 'chunks'):
content = File(content, name)
name = self.get_available_name(name, max_length=max_length)
return self._save(name, content)
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name, max_length=None):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a random 7
# character alphanumeric string (before the file extension, if one
# exists) to the filename until the generated filename doesn't exist.
# Truncate original name if required, so the new filename does not
# exceed the max_length.
while self.exists(name) or (max_length and len(name) > max_length):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
if max_length is None:
continue
# Truncate file_root if max_length exceeded.
truncation = len(name) - max_length
if truncation > 0:
file_root = file_root[:-truncation]
# Entire file_root was truncated in attempt to find an available filename.
if not file_root:
raise SuspiciousFileOperation(
'Storage can not find an available filename for "%s". '
'Please make sure that the corresponding file field '
'allows sufficient "max_length".' % name
)
name = os.path.join(dir_name, "%s_%s%s" % (file_root, get_random_string(7), file_ext))
return name
def generate_filename(self, filename):
"""
Validate the filename by calling get_valid_name() and return a filename
to be passed to the save() method.
"""
# `filename` may include a path as returned by FileField.upload_to.
dirname, filename = os.path.split(filename)
return os.path.normpath(os.path.join(dirname, self.get_valid_name(filename)))
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError('subclasses of Storage must provide a delete() method')
def exists(self, name):
"""
Returns True if a file referenced by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError('subclasses of Storage must provide an exists() method')
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError('subclasses of Storage must provide a listdir() method')
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError('subclasses of Storage must provide a size() method')
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError('subclasses of Storage must provide a url() method')
def accessed_time(self, name):
"""
Returns the last accessed time (as datetime object) of the file
specified by name. Deprecated: use get_accessed_time() instead.
"""
warnings.warn(
'Storage.accessed_time() is deprecated in favor of get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide an accessed_time() method')
def created_time(self, name):
"""
Returns the creation time (as datetime object) of the file
specified by name. Deprecated: use get_created_time() instead.
"""
warnings.warn(
'Storage.created_time() is deprecated in favor of get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a created_time() method')
def modified_time(self, name):
"""
Returns the last modified time (as datetime object) of the file
specified by name. Deprecated: use get_modified_time() instead.
"""
warnings.warn(
'Storage.modified_time() is deprecated in favor of get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
raise NotImplementedError('subclasses of Storage must provide a modified_time() method')
def get_accessed_time(self, name):
"""
Return the last accessed time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_accessed_time() method')
warnings.warn(
'Storage.accessed_time() is deprecated. '
'Storage backends should implement get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.accessed_time(name)
return _possibly_make_aware(dt)
def get_created_time(self, name):
"""
Return the creation time (as a datetime) of the file specified by name.
The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_created_time() method')
warnings.warn(
'Storage.created_time() is deprecated. '
'Storage backends should implement get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.created_time(name)
return _possibly_make_aware(dt)
def get_modified_time(self, name):
"""
Return the last modified time (as a datetime) of the file specified by
name. The datetime will be timezone-aware if USE_TZ=True.
"""
# At the end of the deprecation:
# raise NotImplementedError('subclasses of Storage must provide a get_modified_time() method')
warnings.warn(
'Storage.modified_time() is deprecated. '
'Storage backends should implement get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
dt = self.modified_time(name)
return _possibly_make_aware(dt)
def _possibly_make_aware(dt):
"""
Convert a datetime object in the local timezone to aware
in UTC, if USE_TZ is True.
"""
# This function is only needed to help with the deprecations above and can
# be removed in Django 2.0, RemovedInDjango20Warning.
if settings.USE_TZ:
tz = timezone.get_default_timezone()
return timezone.make_aware(dt, tz).astimezone(timezone.utc)
else:
return dt
@deconstructible
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None, file_permissions_mode=None,
directory_permissions_mode=None):
self._location = location
self._base_url = base_url
self._file_permissions_mode = file_permissions_mode
self._directory_permissions_mode = directory_permissions_mode
setting_changed.connect(self._clear_cached_properties)
def _clear_cached_properties(self, setting, **kwargs):
"""Reset setting based property values."""
if setting == 'MEDIA_ROOT':
self.__dict__.pop('base_location', None)
self.__dict__.pop('location', None)
elif setting == 'MEDIA_URL':
self.__dict__.pop('base_url', None)
elif setting == 'FILE_UPLOAD_PERMISSIONS':
self.__dict__.pop('file_permissions_mode', None)
elif setting == 'FILE_UPLOAD_DIRECTORY_PERMISSIONS':
self.__dict__.pop('directory_permissions_mode', None)
def _value_or_setting(self, value, setting):
return setting if value is None else value
@cached_property
def base_location(self):
return self._value_or_setting(self._location, settings.MEDIA_ROOT)
@cached_property
def location(self):
return abspathu(self.base_location)
@cached_property
def base_url(self):
if self._base_url is not None and not self._base_url.endswith('/'):
self._base_url += '/'
return self._value_or_setting(self._base_url, settings.MEDIA_URL)
@cached_property
def file_permissions_mode(self):
return self._value_or_setting(self._file_permissions_mode, settings.FILE_UPLOAD_PERMISSIONS)
@cached_property
def directory_permissions_mode(self):
return self._value_or_setting(self._directory_permissions_mode, settings.FILE_UPLOAD_DIRECTORY_PERMISSIONS)
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
# Create any intermediate directories that do not exist.
# Note that there is a race between os.path.exists and os.makedirs:
# if os.makedirs fails with EEXIST, the directory was created
# concurrently, and we can continue normally. Refs #16082.
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
try:
if self.directory_permissions_mode is not None:
# os.makedirs applies the global umask, so we reset it,
# for consistency with file_permissions_mode behavior.
old_umask = os.umask(0)
try:
os.makedirs(directory, self.directory_permissions_mode)
finally:
os.umask(old_umask)
else:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
if not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
flags = (os.O_WRONLY | os.O_CREAT | os.O_EXCL |
getattr(os, 'O_BINARY', 0))
# The current umask value is masked out by os.open!
fd = os.open(full_path, flags, 0o666)
_file = None
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
if _file is None:
mode = 'wb' if isinstance(chunk, bytes) else 'wt'
_file = os.fdopen(fd, mode)
_file.write(chunk)
finally:
locks.unlock(fd)
if _file is not None:
_file.close()
else:
os.close(fd)
except OSError as e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if self.file_permissions_mode is not None:
os.chmod(full_path, self.file_permissions_mode)
# Store filenames with forward slashes, even on Windows.
return force_text(name.replace('\\', '/'))
def delete(self, name):
assert name, "The name argument is not allowed to be empty."
name = self.path(name)
# If the file exists, delete it from the filesystem.
# If os.remove() fails with ENOENT, the file may have been removed
# concurrently, and it's safe to continue normally.
try:
os.remove(name)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
return safe_join(self.location, name)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
url = filepath_to_uri(name)
if url is not None:
url = url.lstrip('/')
return urljoin(self.base_url, url)
def accessed_time(self, name):
warnings.warn(
'FileSystemStorage.accessed_time() is deprecated in favor of '
'get_accessed_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getatime(self.path(name)))
def created_time(self, name):
warnings.warn(
'FileSystemStorage.created_time() is deprecated in favor of '
'get_created_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getctime(self.path(name)))
def modified_time(self, name):
warnings.warn(
'FileSystemStorage.modified_time() is deprecated in favor of '
'get_modified_time().',
RemovedInDjango20Warning,
stacklevel=2,
)
return datetime.fromtimestamp(os.path.getmtime(self.path(name)))
def _datetime_from_timestamp(self, ts):
"""
If timezone support is enabled, make an aware datetime object in UTC;
otherwise make a naive one in the local timezone.
"""
if settings.USE_TZ:
# Safe to use .replace() because UTC doesn't have DST
return datetime.utcfromtimestamp(ts).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ts)
def get_accessed_time(self, name):
return self._datetime_from_timestamp(os.path.getatime(self.path(name)))
def get_created_time(self, name):
return self._datetime_from_timestamp(os.path.getctime(self.path(name)))
def get_modified_time(self, name):
return self._datetime_from_timestamp(os.path.getmtime(self.path(name)))
def get_storage_class(import_path=None):
return import_string(import_path or settings.DEFAULT_FILE_STORAGE)
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
| gpl-3.0 |
Hybrid-Cloud/cinder | cinder/volume/drivers/emc/emc_vmax_masking.py | 1 | 124507 | # Copyright (c) 2012 - 2015 EMC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_concurrency import lockutils
from oslo_log import log as logging
import six
from cinder import exception
from cinder.i18n import _, _LE, _LI, _LW
from cinder.volume.drivers.emc import emc_vmax_fast
from cinder.volume.drivers.emc import emc_vmax_provision
from cinder.volume.drivers.emc import emc_vmax_provision_v3
from cinder.volume.drivers.emc import emc_vmax_utils
LOG = logging.getLogger(__name__)
STORAGEGROUPTYPE = 4
POSTGROUPTYPE = 3
INITIATORGROUPTYPE = 2
ISCSI = 'iscsi'
FC = 'fc'
EMC_ROOT = 'root/emc'
FASTPOLICY = 'storagetype:fastpolicy'
ISV3 = 'isV3'
class EMCVMAXMasking(object):
"""Masking class for SMI-S based EMC volume drivers.
Masking code to dynamically create a masking view
This masking class is for EMC volume drivers based on SMI-S.
It supports VMAX arrays.
"""
def __init__(self, prtcl):
self.protocol = prtcl
self.utils = emc_vmax_utils.EMCVMAXUtils(prtcl)
self.fast = emc_vmax_fast.EMCVMAXFast(prtcl)
self.provision = emc_vmax_provision.EMCVMAXProvision(prtcl)
self.provisionv3 = emc_vmax_provision_v3.EMCVMAXProvisionV3(prtcl)
def setup_masking_view(self, conn, maskingViewDict, extraSpecs):
@lockutils.synchronized(maskingViewDict['maskingViewName'],
"emc-mv-", True)
def do_get_or_create_masking_view_and_map_lun():
return self.get_or_create_masking_view_and_map_lun(conn,
maskingViewDict,
extraSpecs)
return do_get_or_create_masking_view_and_map_lun()
def get_or_create_masking_view_and_map_lun(self, conn, maskingViewDict,
extraSpecs):
"""Get or Create a masking view and add a volume to the storage group.
Given a masking view tuple either get or create a masking view and add
the volume to the associated storage group.
If it is a live migration operation then we do not need to remove
the volume from any storage group (default or otherwise).
:param conn: the connection to ecom
:param maskingViewDict: the masking view dict
:param extraSpecs: additional info
:returns: dict -- rollbackDict
:raises: VolumeBackendAPIException
"""
rollbackDict = {}
controllerConfigService = maskingViewDict['controllerConfigService']
volumeInstance = maskingViewDict['volumeInstance']
maskingViewName = maskingViewDict['maskingViewName']
volumeName = maskingViewDict['volumeName']
isV3 = maskingViewDict['isV3']
isLiveMigration = maskingViewDict['isLiveMigration']
maskingViewDict['extraSpecs'] = extraSpecs
defaultStorageGroupInstanceName = None
fastPolicyName = None
storageGroupInstanceName = None
if isLiveMigration is False:
if isV3:
defaultStorageGroupInstanceName = (
self._get_v3_default_storagegroup_instancename(
conn, volumeInstance, maskingViewDict,
controllerConfigService, volumeName))
else:
fastPolicyName = maskingViewDict['fastPolicy']
# If FAST is enabled remove the volume from the default SG.
if fastPolicyName is not None:
defaultStorageGroupInstanceName = (
self._get_and_remove_from_storage_group_v2(
conn, controllerConfigService,
volumeInstance.path,
volumeName, fastPolicyName,
extraSpecs))
else:
# Live Migration
self.remove_and_reset_members(
conn, controllerConfigService, volumeInstance, volumeName,
extraSpecs, maskingViewDict['connector'], False)
# If anything has gone wrong with the masking view we rollback
try:
maskingViewInstanceName, storageGroupInstanceName, errorMessage = (
self._validate_masking_view(conn, maskingViewDict,
defaultStorageGroupInstanceName,
extraSpecs))
LOG.debug(
"The masking view in the attach operation is "
"%(maskingViewInstanceName)s. The storage group "
"in the masking view is %(storageGroupInstanceName)s.",
{'maskingViewInstanceName': maskingViewInstanceName,
'storageGroupInstanceName': storageGroupInstanceName})
except Exception as e:
LOG.exception(_LE(
"Masking View creation or retrieval was not successful "
"for masking view %(maskingViewName)s. "
"Attempting rollback."),
{'maskingViewName': maskingViewDict['maskingViewName']})
errorMessage = e
rollbackDict['pgGroupName'], errorMessage = (
self._get_port_group_name_from_mv(
conn, maskingViewDict['maskingViewName'],
maskingViewDict['storageSystemName']))
if not errorMessage:
# Only after the masking view has been validated, add the
# volume to the storage group and recheck that it has been
# successfully added.
errorMessage = self._check_adding_volume_to_storage_group(
conn, maskingViewDict, storageGroupInstanceName)
rollbackDict['controllerConfigService'] = controllerConfigService
rollbackDict['defaultStorageGroupInstanceName'] = (
defaultStorageGroupInstanceName)
rollbackDict['volumeInstance'] = volumeInstance
rollbackDict['volumeName'] = volumeName
rollbackDict['fastPolicyName'] = fastPolicyName
rollbackDict['isV3'] = isV3
rollbackDict['extraSpecs'] = extraSpecs
rollbackDict['sgGroupName'] = maskingViewDict['sgGroupName']
rollbackDict['igGroupName'] = maskingViewDict['igGroupName']
rollbackDict['connector'] = maskingViewDict['connector']
if errorMessage:
# Rollback code if we cannot complete any of the steps above
# successfully then we must roll back by adding the volume back to
# the default storage group for that fast policy.
if (fastPolicyName is not None):
# If the errorMessage was returned before the volume
# was removed from the default storage group no action.
self._check_if_rollback_action_for_masking_required(
conn, rollbackDict)
if isV3:
if maskingViewDict['slo'] is not None:
rollbackDict['storageSystemName'] = (
maskingViewDict['storageSystemName'])
rollbackDict['slo'] = maskingViewDict['slo']
self._check_if_rollback_action_for_masking_required(
conn, rollbackDict)
else:
errorMessage = self._check_adding_volume_to_storage_group(
conn, rollbackDict,
rollbackDict['defaultStorageGroupInstanceName'])
if errorMessage:
LOG.error(errorMessage)
exceptionMessage = (_(
"Failed to get, create or add volume %(volumeName)s "
"to masking view %(maskingViewName)s. "
"The error message received was %(errorMessage)s.")
% {'maskingViewName': maskingViewName,
'volumeName': volumeName,
'errorMessage': errorMessage})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(data=exceptionMessage)
return rollbackDict
def _get_v3_default_storagegroup_instancename(self, conn, volumeinstance,
maskingviewdict,
controllerConfigService,
volumeName):
defaultStorageGroupInstanceName = None
defaultSgGroupName = self.utils.get_v3_storage_group_name(
maskingviewdict['pool'],
maskingviewdict['slo'],
maskingviewdict['workload'])
assocStorageGroupInstanceNames = (
self.utils.get_storage_groups_from_volume(
conn, volumeinstance.path))
for assocStorageGroupInstanceName in (
assocStorageGroupInstanceNames):
instance = conn.GetInstance(
assocStorageGroupInstanceName, LocalOnly=False)
assocStorageGroupName = instance['ElementName']
if assocStorageGroupName == defaultSgGroupName:
defaultStorageGroupInstanceName = (
assocStorageGroupInstanceName)
break
if defaultStorageGroupInstanceName:
self._get_and_remove_from_storage_group_v3(
conn, controllerConfigService, volumeinstance.path,
volumeName, maskingviewdict,
defaultStorageGroupInstanceName)
else:
LOG.warning(_LW(
"Volume: %(volumeName)s does not belong "
"to storage group %(defaultSgGroupName)s."),
{'volumeName': volumeName,
'defaultSgGroupName': defaultSgGroupName})
return defaultStorageGroupInstanceName
def _validate_masking_view(self, conn, maskingViewDict,
defaultStorageGroupInstanceName,
extraSpecs):
"""Validate all the individual pieces of the masking view.
:param conn: the ecom connection
:param maskingViewDict: the masking view dictionary
:param defaultStorageGroupInstanceName: the default SG
:param extraSpecs: extra specifications
:returns: maskingViewInstanceName
:returns: storageGroupInstanceName,
:returns: string -- errorMessage
"""
storageSystemName = maskingViewDict['storageSystemName']
maskingViewName = maskingViewDict['maskingViewName']
maskingViewInstanceName = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if maskingViewInstanceName is None:
maskingViewInstanceName, storageGroupInstanceName, errorMessage = (
self._validate_new_masking_view(
conn, maskingViewDict, defaultStorageGroupInstanceName,
extraSpecs))
else:
storageGroupInstanceName, errorMessage = (
self._validate_existing_masking_view(
conn, maskingViewDict, maskingViewInstanceName,
extraSpecs))
return maskingViewInstanceName, storageGroupInstanceName, errorMessage
def _validate_new_masking_view(self, conn, maskingViewDict,
defaultStorageGroupInstanceName,
extraSpecs):
"""Validate the creation of a new masking view.
:param conn: the ecom connection
:param maskingViewDict: the masking view dictionary
:param defaultStorageGroupInstanceName: the default SG
:param extraSpecs: extra specifications
:returns: maskingViewInstanceName
:returns: storageGroupInstanceName,
:returns: string -- errorMessage
"""
controllerConfigService = maskingViewDict['controllerConfigService']
igGroupName = maskingViewDict['igGroupName']
connector = maskingViewDict['connector']
storageSystemName = maskingViewDict['storageSystemName']
maskingViewName = maskingViewDict['maskingViewName']
pgGroupName = maskingViewDict['pgGroupName']
LOG.info(_LI("Returning random Port Group: "
"%(portGroupName)s."),
{'portGroupName': pgGroupName})
storageGroupInstanceName, errorMessage = (
self._check_storage_group(
conn, maskingViewDict, defaultStorageGroupInstanceName))
if errorMessage:
return None, storageGroupInstanceName, errorMessage
portGroupInstanceName, errorMessage = (
self._check_port_group(conn, controllerConfigService,
pgGroupName))
if errorMessage:
return None, storageGroupInstanceName, errorMessage
initiatorGroupInstanceName, errorMessage = (
self._check_initiator_group(conn, controllerConfigService,
igGroupName, connector,
storageSystemName, extraSpecs))
if errorMessage:
return None, storageGroupInstanceName, errorMessage
# Only after the components of the MV have been validated,
# add the volume to the storage group and recheck that it
# has been successfully added. This is necessary before
# creating a new masking view.
errorMessage = self._check_adding_volume_to_storage_group(
conn, maskingViewDict, storageGroupInstanceName)
if errorMessage:
return None, storageGroupInstanceName, errorMessage
maskingViewInstanceName, errorMessage = (
self._check_masking_view(
conn, controllerConfigService,
maskingViewName, storageGroupInstanceName,
portGroupInstanceName, initiatorGroupInstanceName,
extraSpecs))
return maskingViewInstanceName, storageGroupInstanceName, errorMessage
def _validate_existing_masking_view(self,
conn, maskingViewDict,
maskingViewInstanceName, extraSpecs):
"""Validate the components of an existing masking view.
:param conn: the ecom connection
:param maskingViewDict: the masking view dictionary
:param maskingViewInstanceName: the masking view instance name
:param extraSpecs: extra specification
:returns: storageGroupInstanceName
:returns: string -- errorMessage
"""
storageGroupInstanceName = None
controllerConfigService = maskingViewDict['controllerConfigService']
sgGroupName = maskingViewDict['sgGroupName']
igGroupName = maskingViewDict['igGroupName']
connector = maskingViewDict['connector']
storageSystemName = maskingViewDict['storageSystemName']
maskingViewName = maskingViewDict['maskingViewName']
# First verify that the initiator group matches the initiators.
errorMessage = self._check_existing_initiator_group(
conn, controllerConfigService, maskingViewName,
connector, storageSystemName, igGroupName, extraSpecs)
if errorMessage:
return storageGroupInstanceName, errorMessage
storageGroupInstanceName, errorMessage = (
self._check_existing_storage_group(
conn, controllerConfigService, sgGroupName,
maskingViewInstanceName))
return storageGroupInstanceName, errorMessage
def _check_storage_group(self, conn,
maskingViewDict, storageGroupInstanceName):
"""Get the storage group and return it.
:param conn: the ecom connection
:param maskingViewDict: the masking view dictionary
:param storageGroupInstanceName: default storage group instance name
:returns: storageGroupInstanceName
:returns: string -- msg, the error message
"""
msg = None
storageGroupInstanceName = (
self._get_storage_group_instance_name(
conn, maskingViewDict, storageGroupInstanceName))
if storageGroupInstanceName is None:
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Cannot get or create a storage group: %(sgGroupName)s"
" for volume %(volumeName)s ") %
{'sgGroupName': maskingViewDict['sgGroupName'],
'volumeName': maskingViewDict['volumeName']})
LOG.error(msg)
return storageGroupInstanceName, msg
def _check_existing_storage_group(
self, conn, controllerConfigService,
sgGroupName, maskingViewInstanceName):
"""Check that we can get the existing storage group.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param sgGroupName: the storage group name
:param maskingViewInstanceName: the masking view instance name
:returns: storageGroupInstanceName
:returns: string -- msg, the error message
"""
msg = None
sgFromMvInstanceName = (
self._get_storage_group_from_masking_view_instance(
conn, maskingViewInstanceName))
if sgFromMvInstanceName is None:
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Cannot get storage group: %(sgGroupName)s "
"from masking view %(maskingViewInstanceName)s. ") %
{'sgGroupName': sgGroupName,
'maskingViewInstanceName': maskingViewInstanceName})
LOG.error(msg)
return sgFromMvInstanceName, msg
def _check_port_group(self, conn,
controllerConfigService, pgGroupName):
"""Check that you can either get or create a port group.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param pgGroupName: the port group Name
:returns: portGroupInstanceName
:returns: string -- msg, the error message
"""
msg = None
portGroupInstanceName = self._get_port_group_instance_name(
conn, controllerConfigService, pgGroupName)
if portGroupInstanceName is None:
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Cannot get port group: %(pgGroupName)s. ") %
{'pgGroupName': pgGroupName})
LOG.error(msg)
return portGroupInstanceName, msg
def _check_initiator_group(
self, conn, controllerConfigService, igGroupName,
connector, storageSystemName, extraSpecs):
"""Check that initiator group can be either retrieved or created.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param igGroupName: the initiator group Name
:param connector: the connector object
:param storageSystemName: the storage system name
:param extraSpecs: extra specifications
:returns: initiatorGroupInstanceName
:returns: string -- the error message
"""
msg = None
initiatorGroupInstanceName = (
self._get_initiator_group_instance_name(
conn, controllerConfigService, igGroupName, connector,
storageSystemName, extraSpecs))
if initiatorGroupInstanceName is None:
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Cannot get or create initiator group: "
"%(igGroupName)s. ") %
{'igGroupName': igGroupName})
LOG.error(msg)
return initiatorGroupInstanceName, msg
def _check_existing_initiator_group(
self, conn, controllerConfigService, maskingViewName,
connector, storageSystemName, igGroupName, extraSpecs):
"""Check that existing initiator group in the masking view.
Check if the initiators in the initiator group match those in the
system.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param maskingViewName: the masking view name
:param connector: the connector object
:param storageSystemName: the storage system name
:param igGroupName: the initiator group name
:param extraSpecs: extra specification
:returns: string -- msg, the error message
"""
msg = None
if not self._verify_initiator_group_from_masking_view(
conn, controllerConfigService, maskingViewName,
connector, storageSystemName, igGroupName,
extraSpecs):
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Unable to verify initiator group: %(igGroupName)s "
"in masking view %(maskingViewName)s. ") %
{'igGroupName': igGroupName,
'maskingViewName': maskingViewName})
LOG.error(msg)
return msg
def _check_masking_view(
self, conn, controllerConfigService,
maskingViewName, storageGroupInstanceName,
portGroupInstanceName, initiatorGroupInstanceName, extraSpecs):
"""Check that masking view can be either got or created.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param maskingViewName: the masking view name
:param storageGroupInstanceName: storage group instance name
:param portGroupInstanceName: port group instance name
:param initiatorGroupInstanceName: the initiator group instance name
:param extraSpecs: extra specifications
:returns: maskingViewInstanceName
:returns: string -- msg, the error message
"""
msg = None
maskingViewInstanceName = (
self._get_masking_view_instance_name(
conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName, extraSpecs))
if maskingViewInstanceName is None:
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Cannot create masking view: %(maskingViewName)s. ") %
{'maskingViewName': maskingViewName})
LOG.error(msg)
return maskingViewInstanceName, msg
def _check_adding_volume_to_storage_group(
self, conn, maskingViewDict, storageGroupInstanceName):
"""Add the volume to the storage group and double check it is there.
:param conn: the ecom connection
:param maskingViewDict: the masking view dictionary
:param storageGroupInstanceName: storage group instance name
:returns: string -- the error message
"""
controllerConfigService = maskingViewDict['controllerConfigService']
sgGroupName = maskingViewDict['sgGroupName']
volumeInstance = maskingViewDict['volumeInstance']
volumeName = maskingViewDict['volumeName']
msg = None
if self._is_volume_in_storage_group(
conn, storageGroupInstanceName,
volumeInstance, sgGroupName):
LOG.warning(_LW(
"Volume: %(volumeName)s is already part "
"of storage group %(sgGroupName)s."),
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
else:
msg = self._add_volume_to_sg_and_verify(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, sgGroupName,
maskingViewDict['extraSpecs'])
return msg
def _add_volume_to_sg_and_verify(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, sgGroupName, extraSpecs):
"""Add the volume to the storage group and double check it is there.
:param conn: the ecom connection
:param controllerConfigService: controller service
:param storageGroupInstanceName: storage group instance name
:param volumeInstance: the volume instance
:param volumeName: the volume name
:param sgGroupName: the storage group name
:param extraSpecs: the extra specifications
:returns: string -- the error message
"""
msg = None
self.add_volume_to_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, sgGroupName, extraSpecs)
if not self._is_volume_in_storage_group(
conn, storageGroupInstanceName, volumeInstance, sgGroupName):
# This may be used in exception hence _ instead of _LE.
msg = (_(
"Volume: %(volumeName)s was not added "
"to storage group %(sgGroupName)s.") %
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
LOG.error(msg)
else:
LOG.info(_LI("Successfully added %(volumeName)s to "
"%(sgGroupName)s."),
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
return msg
def _get_and_remove_from_storage_group_v2(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName, extraSpecs):
"""Get the storage group and remove volume from it.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param volumeInstanceName: volume instance name
:param volumeName: volume name
:param fastPolicyName: fast name
:param extraSpecs: additional info
:returns: defaultStorageGroupInstanceName
:raises: VolumeBackendAPIException
"""
defaultStorageGroupInstanceName = (
self.fast.get_and_verify_default_storage_group(
conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
exceptionMessage = (_(
"Cannot get the default storage group for FAST policy: "
"%(fastPolicyName)s.")
% {'fastPolicyName': fastPolicyName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
retStorageGroupInstanceName = (
self.remove_device_from_default_storage_group(
conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName, extraSpecs))
if retStorageGroupInstanceName is None:
exceptionMessage = (_(
"Failed to remove volume %(volumeName)s from default SG.")
% {'volumeName': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
return defaultStorageGroupInstanceName
def _get_and_remove_from_storage_group_v3(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, maskingViewDict, storageGroupInstanceName):
"""Get the storage group and remove volume from it.
:param conn: the ecom connection
:param controllerConfigService: controller configuration service
:param volumeInstanceName: volume instance name
:param volumeName: volume name
:param maskingViewDict: the masking view dictionary
:param storageGroupInstanceName: storage group instance name
:raises: VolumeBackendAPIException
"""
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, storageGroupInstanceName)
LOG.debug(
"There are %(length)lu associated with the default storage group "
"before removing volume %(volumeName)s.",
{'length': len(assocVolumeInstanceNames),
'volumeName': volumeName})
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstanceName, volumeName, maskingViewDict['extraSpecs'])
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, storageGroupInstanceName)
LOG.debug(
"There are %(length)lu associated with the default storage group "
"after removing volume %(volumeName)s.",
{'length': len(assocVolumeInstanceNames),
'volumeName': volumeName})
# Required for unit tests.
emptyStorageGroupInstanceName = (
self._wrap_get_storage_group_from_volume(
conn, volumeInstanceName, maskingViewDict['sgGroupName']))
if emptyStorageGroupInstanceName is not None:
exceptionMessage = (_(
"Failed to remove volume %(volumeName)s from default SG: "
"%(volumeName)s.")
% {'volumeName': volumeName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
def _is_volume_in_storage_group(
self, conn, storageGroupInstanceName, volumeInstance, sgName):
"""Check if the volume is already part of the storage group.
Check if the volume is already part of the storage group,
if it is no need to re-add it.
:param conn: the connection to ecom
:param storageGroupInstanceName: the storage group instance name
:param volumeInstance: the volume instance
:param sgName: the storage group name
:returns: boolean
"""
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
conn, volumeInstance.path, sgName))
if foundStorageGroupInstanceName is not None:
storageGroupInstance = conn.GetInstance(
storageGroupInstanceName, LocalOnly=False)
LOG.debug(
"The existing storage group instance element name is: "
"%(existingElement)s.",
{'existingElement': storageGroupInstance['ElementName']})
foundStorageGroupInstance = conn.GetInstance(
foundStorageGroupInstanceName, LocalOnly=False)
LOG.debug(
"The found storage group instance element name is: "
"%(foundElement)s.",
{'foundElement': foundStorageGroupInstance['ElementName']})
if (foundStorageGroupInstance['ElementName'] == (
storageGroupInstance['ElementName'])):
return True
return False
def _find_masking_view(self, conn, maskingViewName, storageSystemName):
"""Given the masking view name get the masking view instance.
:param conn: connection to the ecom server
:param maskingViewName: the masking view name
:param storageSystemName: the storage system name(String)
:returns: dict -- foundMaskingViewInstanceName
"""
foundMaskingViewInstanceName = None
storageSystemInstanceName = self.utils.find_storageSystem(
conn, storageSystemName)
maskingViewInstances = conn.Associators(
storageSystemInstanceName,
ResultClass='EMC_LunMaskingSCSIProtocolController')
for maskingViewInstance in maskingViewInstances:
if maskingViewName == maskingViewInstance['ElementName']:
foundMaskingViewInstanceName = maskingViewInstance.path
break
if foundMaskingViewInstanceName is not None:
# Now check that is has not been deleted.
instance = self.utils.get_existing_instance(
conn, foundMaskingViewInstanceName)
if instance is None:
foundMaskingViewInstanceName = None
LOG.error(_LE(
"Looks like masking view: %(maskingViewName)s "
"has recently been deleted."),
{'maskingViewName': maskingViewName})
else:
LOG.debug(
"Found existing masking view: %(maskingViewName)s.",
{'maskingViewName': maskingViewName})
return foundMaskingViewInstanceName
def _create_storage_group(
self, conn, maskingViewDict, defaultStorageGroupInstanceName):
"""Create a new storage group that doesn't already exist.
If fastPolicyName is not none we attempt to remove it from the
default storage group of that policy and associate to the new storage
group that will be part of the masking view.
Will not handle any exception in this method it will be handled
up the stack.
:param conn: connection to the ecom server
:param maskingViewDict: the masking view dictionary
:param defaultStorageGroupInstanceName: the default storage group
instance name (Can be None)
:returns: foundStorageGroupInstanceName the instance Name of the
storage group
"""
failedRet = None
controllerConfigService = maskingViewDict['controllerConfigService']
storageGroupName = maskingViewDict['sgGroupName']
isV3 = maskingViewDict['isV3']
if isV3:
workload = maskingViewDict['workload']
pool = maskingViewDict['pool']
slo = maskingViewDict['slo']
foundStorageGroupInstanceName = (
self.provisionv3.create_storage_group_v3(
conn, controllerConfigService, storageGroupName,
pool, slo, workload, maskingViewDict['extraSpecs']))
else:
fastPolicyName = maskingViewDict['fastPolicy']
volumeInstance = maskingViewDict['volumeInstance']
foundStorageGroupInstanceName = (
self.provision.create_and_get_storage_group(
conn, controllerConfigService, storageGroupName,
volumeInstance.path, maskingViewDict['extraSpecs']))
if (fastPolicyName is not None and
defaultStorageGroupInstanceName is not None):
assocTierPolicyInstanceName = (
self.fast.add_storage_group_and_verify_tier_policy_assoc(
conn, controllerConfigService,
foundStorageGroupInstanceName,
storageGroupName, fastPolicyName,
maskingViewDict['extraSpecs']))
if assocTierPolicyInstanceName is None:
LOG.error(_LE(
"Cannot add and verify tier policy association for "
"storage group : %(storageGroupName)s to "
"FAST policy : %(fastPolicyName)s."),
{'storageGroupName': storageGroupName,
'fastPolicyName': fastPolicyName})
return failedRet
if foundStorageGroupInstanceName is None:
LOG.error(_LE(
"Cannot get storage Group from job : %(storageGroupName)s."),
{'storageGroupName': storageGroupName})
return failedRet
else:
LOG.info(_LI(
"Created new storage group: %(storageGroupName)s."),
{'storageGroupName': storageGroupName})
return foundStorageGroupInstanceName
def find_port_group(self, conn, controllerConfigService, portGroupName):
"""Given the port Group name get the port group instance name.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param portGroupName: the name of the port group you are getting
:returns: foundPortGroupInstanceName
"""
foundPortGroupInstanceName = None
portMaskingGroupInstances = conn.Associators(
controllerConfigService, ResultClass='CIM_TargetMaskingGroup')
for portMaskingGroupInstance in portMaskingGroupInstances:
if portGroupName == portMaskingGroupInstance['ElementName']:
# Check to see if it has been recently deleted.
instance = self.utils.get_existing_instance(
conn, portMaskingGroupInstance.path)
if instance is None:
foundPortGroupInstanceName = None
else:
foundPortGroupInstanceName = instance.path
break
if foundPortGroupInstanceName is None:
LOG.error(_LE(
"Could not find port group : %(portGroupName)s. Check that "
"the EMC configuration file has the correct port group name."),
{'portGroupName': portGroupName})
return foundPortGroupInstanceName
def _create_or_get_initiator_group(
self, conn, controllerConfigService, igGroupName,
connector, storageSystemName, extraSpecs):
"""Attempt to create an initiatorGroup.
If one already exists with the same Initiator/wwns then get it.
Check to see if an initiatorGroup already exists, that matches the
connector information.
NOTE: An initiator/wwn can only belong to one initiatorGroup.
If we were to attempt to create one with an initiator/wwn that
is already belong to another initiatorGroup, it would fail.
:param conn: connection to the ecom server
:param controllerConfigService: the controller config Servicer
:param igGroupName: the proposed name of the initiator group
:param connector: the connector information to the host
:param storageSystemName: the storage system name (String)
:param extraSpecs: extra specifications
:returns: foundInitiatorGroupInstanceName
"""
initiatorNames = self._find_initiator_names(conn, connector)
LOG.debug("The initiator name(s) are: %(initiatorNames)s.",
{'initiatorNames': initiatorNames})
foundInitiatorGroupInstanceName = self._find_initiator_masking_group(
conn, controllerConfigService, initiatorNames)
# If you cannot find an initiatorGroup that matches the connector
# info create a new initiatorGroup.
if foundInitiatorGroupInstanceName is None:
# Check that our connector information matches the
# hardwareId(s) on the vmax.
storageHardwareIDInstanceNames = (
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.info(_LI(
"Initiator Name(s) %(initiatorNames)s are not on array "
"%(storageSystemName)s."),
{'initiatorNames': initiatorNames,
'storageSystemName': storageSystemName})
storageHardwareIDInstanceNames = (
self._create_hardware_ids(conn, initiatorNames,
storageSystemName))
if not storageHardwareIDInstanceNames:
msg = (_("Failed to create hardware id(s) on "
"%(storageSystemName)s.")
% {'storageSystemName': storageSystemName})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
foundInitiatorGroupInstanceName = self._create_initiator_Group(
conn, controllerConfigService, igGroupName,
storageHardwareIDInstanceNames, extraSpecs)
LOG.info(_LI(
"Created new initiator group name: %(igGroupName)s."),
{'igGroupName': igGroupName})
else:
initiatorGroupInstance = conn.GetInstance(
foundInitiatorGroupInstanceName, LocalOnly=False)
LOG.info(_LI(
"Using existing initiator group name: %(igGroupName)s."),
{'igGroupName': initiatorGroupInstance['ElementName']})
return foundInitiatorGroupInstanceName
def _find_initiator_names(self, conn, connector):
"""Check the connector object for initiators(ISCSI) or wwpns(FC).
:param conn: the connection to the ecom
:param connector: the connector object
:returns: list -- list of found initiator names
:raises: VolumeBackendAPIException
"""
foundinitiatornames = []
name = 'initiator name'
if (self.protocol.lower() == ISCSI and connector['initiator']):
foundinitiatornames.append(connector['initiator'])
elif self.protocol.lower() == FC:
if ('wwpns' in connector and connector['wwpns']):
for wwn in connector['wwpns']:
foundinitiatornames.append(wwn)
name = 'world wide port names'
else:
msg = (_("FC is the protocol but wwpns are "
"not supplied by OpenStack."))
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
if (foundinitiatornames is None or len(foundinitiatornames) == 0):
msg = (_("Error finding %(name)s.")
% {'name': name})
LOG.error(msg)
raise exception.VolumeBackendAPIException(data=msg)
LOG.debug("Found %(name)s: %(initiator)s.",
{'name': name,
'initiator': foundinitiatornames})
return foundinitiatornames
def _find_initiator_masking_group(
self, conn, controllerConfigService, initiatorNames):
"""Check to see if an initiatorGroup already exists.
NOTE: An initiator/wwn can only belong to one initiatorGroup.
If we were to attempt to create one with an initiator/wwn that is
already belong to another initiatorGroup, it would fail.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration service
:param initiatorNames: the list of initiator names
:returns: foundInitiatorMaskingGroup
"""
foundInitiatorMaskingGroupInstanceName = None
initiatorMaskingGroupInstanceNames = (
conn.AssociatorNames(controllerConfigService,
ResultClass='CIM_InitiatorMaskingGroup'))
for initiatorMaskingGroupInstanceName in (
initiatorMaskingGroupInstanceNames):
# Check that it hasn't been deleted. If it has, break out
# of the for loop.
instance = self.utils.get_existing_instance(
conn, initiatorMaskingGroupInstanceName)
if instance is None:
# MaskingGroup doesn't exist any more.
break
storageHardwareIdInstances = (
conn.Associators(initiatorMaskingGroupInstanceName,
ResultClass='EMC_StorageHardwareID'))
for storageHardwareIdInstance in storageHardwareIdInstances:
# If EMC_StorageHardwareID matches the initiator,
# we found the existing CIM_InitiatorMaskingGroup.
hardwareid = storageHardwareIdInstance['StorageID']
for initiator in initiatorNames:
if six.text_type(hardwareid).lower() == (
six.text_type(initiator).lower()):
foundInitiatorMaskingGroupInstanceName = (
initiatorMaskingGroupInstanceName)
break
if foundInitiatorMaskingGroupInstanceName is not None:
break
if foundInitiatorMaskingGroupInstanceName is not None:
break
return foundInitiatorMaskingGroupInstanceName
def _get_storage_hardware_id_instance_names(
self, conn, initiatorNames, storageSystemName):
"""Given a list of initiator names find CIM_StorageHardwareID instance.
:param conn: the connection to the ecom server
:param initiatorNames: the list of initiator names
:param storageSystemName: the storage system name
:returns: list -- foundHardwardIDsInstanceNames
"""
foundHardwardIDsInstanceNames = []
hardwareIdManagementService = (
self.utils.find_storage_hardwareid_service(
conn, storageSystemName))
hardwareIdInstances = (
self.utils.get_hardware_id_instances_from_array(
conn, hardwareIdManagementService))
for hardwareIdInstance in hardwareIdInstances:
storageId = hardwareIdInstance['StorageID']
for initiatorName in initiatorNames:
if storageId.lower() == initiatorName.lower():
# Check that the found hardwareId has been deleted.
# If it has, we don't want to add it to the list.
instance = self.utils.get_existing_instance(
conn, hardwareIdInstance.path)
if instance is None:
# HardwareId doesn't exist. Skip it.
break
foundHardwardIDsInstanceNames.append(
hardwareIdInstance.path)
break
LOG.debug(
"The found hardware IDs are : %(foundHardwardIDsInstanceNames)s.",
{'foundHardwardIDsInstanceNames': foundHardwardIDsInstanceNames})
return foundHardwardIDsInstanceNames
def _get_initiator_group_from_job(self, conn, job):
"""After creating an new initiator group find it and return it.
:param conn: the connection to the ecom server
:param job: the create initiator group job
:returns: dict -- initiatorDict
"""
associators = conn.Associators(
job['Job'],
ResultClass='CIM_InitiatorMaskingGroup')
volpath = associators[0].path
initiatorDict = {}
initiatorDict['classname'] = volpath.classname
keys = {}
keys['CreationClassName'] = volpath['CreationClassName']
keys['SystemName'] = volpath['SystemName']
keys['DeviceID'] = volpath['DeviceID']
keys['SystemCreationClassName'] = volpath['SystemCreationClassName']
initiatorDict['keybindings'] = keys
return initiatorDict
def _create_masking_view(
self, conn, configService, maskingViewName, deviceMaskingGroup,
targetMaskingGroup, initiatorMaskingGroup, extraSpecs):
"""After creating an new initiator group find it and return it.
:param conn: the connection to the ecom server
:param configService: the create initiator group job
:param maskingViewName: the masking view name string
:param deviceMaskingGroup: device(storage) masking group (instanceName)
:param targetMaskingGroup: target(port) masking group (instanceName)
:param initiatorMaskingGroup: initiator masking group (instanceName)
:param extraSpecs: extra specifications
:returns: int -- return code
:returns: dict -- job
:raises: VolumeBackendAPIException
"""
rc, job = conn.InvokeMethod(
'CreateMaskingView', configService, ElementName=maskingViewName,
InitiatorMaskingGroup=initiatorMaskingGroup,
DeviceMaskingGroup=deviceMaskingGroup,
TargetMaskingGroup=targetMaskingGroup)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Masking View: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'groupName': maskingViewName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
LOG.info(_LI(
"Created new masking view : %(maskingViewName)s."),
{'maskingViewName': maskingViewName})
return rc, job
def find_new_masking_view(self, conn, jobDict):
"""Find the newly created volume.
:param conn: the connection to the ecom server
:param jobDict: the job dictionary
:returns: dict -- maskingViewInstance
"""
associators = conn.Associators(
jobDict['Job'],
ResultClass='Symm_LunMaskingView')
mvpath = associators[0].path
maskingViewInstance = {}
maskingViewInstance['classname'] = mvpath.classname
keys = {}
keys['CreationClassName'] = mvpath['CreationClassName']
keys['SystemName'] = mvpath['SystemName']
keys['DeviceID'] = mvpath['DeviceID']
keys['SystemCreationClassName'] = mvpath['SystemCreationClassName']
maskingViewInstance['keybindings'] = keys
return maskingViewInstance
def _get_storage_group_from_masking_view(
self, conn, maskingViewName, storageSystemName):
"""Gets the Device Masking Group from masking view.
:param conn: the connection to the ecom server
:param maskingViewName: the masking view name (String)
:param storageSystemName: storage system name (String)
:returns: instance name foundStorageGroupInstanceName
"""
foundStorageGroupInstanceName = None
foundView = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if foundView is not None:
foundStorageGroupInstanceName = (
self._get_storage_group_from_masking_view_instance(
conn, foundView))
LOG.debug(
"Masking view: %(view)s DeviceMaskingGroup: %(masking)s.",
{'view': maskingViewName,
'masking': foundStorageGroupInstanceName})
else:
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
{'view': maskingViewName})
return foundStorageGroupInstanceName
def _get_storage_group_from_masking_view_instance(
self, conn, maskingViewInstance):
"""Gets the Device Masking Group from masking view instance.
:param conn: the connection to the ecom server
:param maskingViewInstance: the masking view instance
:returns: instance name foundStorageGroupInstanceName
"""
foundStorageGroupInstanceName = None
groups = conn.AssociatorNames(
maskingViewInstance,
ResultClass='CIM_DeviceMaskingGroup')
if len(groups) > 0:
foundStorageGroupInstanceName = groups[0]
return foundStorageGroupInstanceName
def _get_storage_group_instance_name(
self, conn, maskingViewDict,
defaultStorageGroupInstanceName):
"""Gets the storage group instance name.
If fastPolicy name is None then NON FAST is assumed.
If it is a valid fastPolicy name then associate the new storage
group with the fast policy.
If we are using an existing storage group then we must check that
it is associated with the correct fast policy.
:param conn: the connection to the ecom server
:param maskingViewDict: the masking view dictionary
:param defaultStorageGroupInstanceName: default storage group instance
name (can be None for Non FAST)
:returns: instance name storageGroupInstanceName
:raises: VolumeBackendAPIException
"""
storageGroupInstanceName = self.utils.find_storage_masking_group(
conn, maskingViewDict['controllerConfigService'],
maskingViewDict['sgGroupName'])
if storageGroupInstanceName is None:
storageGroupInstanceName = self._create_storage_group(
conn, maskingViewDict,
defaultStorageGroupInstanceName)
if storageGroupInstanceName is None:
errorMessage = (_(
"Cannot create or find an storage group with name "
"%(sgGroupName)s.")
% {'sgGroupName': maskingViewDict['sgGroupName']})
LOG.error(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return storageGroupInstanceName
def _get_port_group_instance_name(
self, conn, controllerConfigService, pgGroupName):
"""Gets the port group instance name.
The portGroup name has been defined in the EMC Config file if it
does not exist the operation should fail.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param pgGroupName: the port group name
:returns: instance name foundPortGroupInstanceName
"""
foundPortGroupInstanceName = self.find_port_group(
conn, controllerConfigService, pgGroupName)
if foundPortGroupInstanceName is None:
LOG.error(_LE(
"Cannot find a portGroup with name %(pgGroupName)s. "
"The port group for a masking view must be pre-defined."),
{'pgGroupName': pgGroupName})
return foundPortGroupInstanceName
LOG.info(_LI(
"Port group instance name is %(foundPortGroupInstanceName)s."),
{'foundPortGroupInstanceName': foundPortGroupInstanceName})
return foundPortGroupInstanceName
def _get_initiator_group_instance_name(
self, conn, controllerConfigService, igGroupName, connector,
storageSystemName, extraSpecs):
"""Gets the initiator group instance name.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param igGroupName: the port group name
:param connector: the connector object
:param storageSystemName: the storage system name
:param extraSpecs: extra specifications
:returns: foundInitiatorGroupInstanceName
"""
foundInitiatorGroupInstanceName = (self._create_or_get_initiator_group(
conn, controllerConfigService, igGroupName, connector,
storageSystemName, extraSpecs))
if foundInitiatorGroupInstanceName is None:
LOG.error(_LE(
"Cannot create or find an initiator group with "
"name %(igGroupName)s."),
{'igGroupName': igGroupName})
return foundInitiatorGroupInstanceName
def _get_masking_view_instance_name(
self, conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName, extraSpecs):
"""Gets the masking view instance name.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller configuration server
:param maskingViewName: the masking view name (String)
:param storageGroupInstanceName: the storage group instance name
:param portGroupInstanceName: the port group instance name
:param initiatorGroupInstanceName: the initiator group instance name
:param extraSpecs: extra specifications
:returns: instance name foundMaskingViewInstanceName
"""
_rc, job = (
self._create_masking_view(
conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
initiatorGroupInstanceName, extraSpecs))
foundMaskingViewInstanceName = self.find_new_masking_view(conn, job)
if foundMaskingViewInstanceName is None:
LOG.error(_LE(
"Cannot find the new masking view just created with name "
"%(maskingViewName)s."),
{'maskingViewName': maskingViewName})
return foundMaskingViewInstanceName
def _check_if_rollback_action_for_masking_required(
self, conn, rollbackDict):
"""This is a rollback action for FAST.
We need to be able to return the volume to the default storage group
if anything has gone wrong. The volume can also potentially belong to
a storage group that is not the default depending on where
the exception occurred. We also may need to clean up any unused
initiator groups.
:param conn: the connection to the ecom server
:param rollbackDict: the rollback dictionary
:returns: message
:raises: VolumeBackendAPIException
"""
message = None
# Check if ig has been created. If so, check for other
# masking views associated with the ig. If none, remove
# initiators and delete ig.
self._check_ig_rollback(
conn, rollbackDict['controllerConfigService'],
rollbackDict['igGroupName'], rollbackDict['connector'],
rollbackDict['extraSpecs'])
try:
foundStorageGroupInstanceName = (
self.utils.get_storage_group_from_volume(
conn, rollbackDict['volumeInstance'].path,
rollbackDict['sgGroupName']))
# Volume is not associated with any storage group so add
# it back to the default.
if not foundStorageGroupInstanceName:
if rollbackDict['isV3']:
errorMessage = self._check_adding_volume_to_storage_group(
conn, rollbackDict,
rollbackDict['defaultStorageGroupInstanceName'])
if errorMessage:
LOG.error(errorMessage)
message = (_("V3 rollback"))
else:
LOG.warning(_LW(
"No storage group found. "
"Performing rollback on Volume: %(volumeName)s "
"To return it to the default storage group for FAST "
"policy %(fastPolicyName)s."),
{'volumeName': rollbackDict['volumeName'],
'fastPolicyName': rollbackDict['fastPolicyName']})
assocDefaultStorageGroupName = (
self.fast
.add_volume_to_default_storage_group_for_fast_policy(
conn,
rollbackDict['controllerConfigService'],
rollbackDict['volumeInstance'],
rollbackDict['volumeName'],
rollbackDict['fastPolicyName'],
rollbackDict['extraSpecs']))
if assocDefaultStorageGroupName is None:
LOG.error(_LE(
"Failed to Roll back to re-add volume "
"%(volumeName)s "
"to default storage group for fast policy "
"%(fastPolicyName)s: Please contact your sys "
"admin to get the volume re-added manually."),
{'volumeName': rollbackDict['volumeName'],
'fastPolicyName': rollbackDict['fastPolicyName']})
message = (_("V2 rollback, volume is not in any storage "
"group."))
else:
LOG.info(_LI(
"The storage group found is "
"%(foundStorageGroupInstanceName)s."),
{'foundStorageGroupInstanceName':
foundStorageGroupInstanceName})
# Check the name, see if it is the default storage group
# or another.
if (foundStorageGroupInstanceName !=
rollbackDict['defaultStorageGroupInstanceName']):
# Remove it from its current masking view and return it
# to its default masking view if fast is enabled or slo
# is defined.
self.remove_and_reset_members(
conn,
rollbackDict['controllerConfigService'],
rollbackDict['volumeInstance'],
rollbackDict['volumeName'],
rollbackDict['extraSpecs'])
message = (_("Rollback - Volume in another storage "
"group besides default storage group."))
except Exception:
errorMessage = (_(
"Rollback for Volume: %(volumeName)s has failed. "
"Please contact your system administrator to manually return "
"your volume to the default storage group for fast policy/ "
"slo.")
% {'volumeName': rollbackDict['volumeName']})
LOG.exception(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
return message
def _find_new_initiator_group(self, conn, maskingGroupDict):
"""After creating an new initiator group find it and return it.
:param conn: connection to the ecom server
:param maskingGroupDict: the maskingGroupDict dict
:returns: instance name foundInitiatorGroupInstanceName
"""
foundInitiatorGroupInstanceName = None
if 'MaskingGroup' in maskingGroupDict:
foundInitiatorGroupInstanceName = maskingGroupDict['MaskingGroup']
return foundInitiatorGroupInstanceName
def _get_initiator_group_from_masking_view(
self, conn, maskingViewName, storageSystemName):
"""Given the masking view name get the initiator group from it.
:param conn: connection to the ecom server
:param maskingViewName: the name of the masking view
:param storageSystemName: the storage system name
:returns: instance name foundInitiatorMaskingGroupInstanceName
"""
foundInitiatorMaskingGroupInstanceName = None
foundView = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if foundView is not None:
groups = conn.AssociatorNames(
foundView,
ResultClass='CIM_InitiatorMaskingGroup')
if len(groups):
foundInitiatorMaskingGroupInstanceName = groups[0]
LOG.debug(
"Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.",
{'view': maskingViewName,
'masking': foundInitiatorMaskingGroupInstanceName})
else:
LOG.warning(_LW("Unable to find Masking view: %(view)s."),
{'view': maskingViewName})
return foundInitiatorMaskingGroupInstanceName
def _verify_initiator_group_from_masking_view(
self, conn, controllerConfigService, maskingViewName, connector,
storageSystemName, igGroupName, extraSpecs):
"""Check that the initiator group contains the correct initiators.
If using an existing masking view check that the initiator group
contains the correct initiators. If it does not contain the correct
initiators then we delete the initiator group from the masking view,
re-create it with the correct initiators and add it to the masking view
NOTE: EMC does not support ModifyMaskingView so we must first
delete the masking view and recreate it.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param maskingViewName: maskingview name (String)
:param connector: the connector dict
:param storageSystemName: the storage System Name (string)
:param igGroupName: the initiator group name (String)
:param extraSpecs: extra specifications
:returns: boolean
"""
initiatorNames = self._find_initiator_names(conn, connector)
foundInitiatorGroupFromConnector = self._find_initiator_masking_group(
conn, controllerConfigService, initiatorNames)
foundInitiatorGroupFromMaskingView = (
self._get_initiator_group_from_masking_view(
conn, maskingViewName, storageSystemName))
if (foundInitiatorGroupFromConnector !=
foundInitiatorGroupFromMaskingView):
if foundInitiatorGroupFromMaskingView is not None:
maskingViewInstanceName = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if foundInitiatorGroupFromConnector is None:
storageHardwareIDInstanceNames = (
self._get_storage_hardware_id_instance_names(
conn, initiatorNames, storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.info(_LI(
"Initiator Name(s) %(initiatorNames)s are not on "
"array %(storageSystemName)s. "),
{'initiatorNames': initiatorNames,
'storageSystemName': storageSystemName})
storageHardwareIDInstanceNames = (
self._create_hardware_ids(conn, initiatorNames,
storageSystemName))
if not storageHardwareIDInstanceNames:
LOG.error(_LE(
"Failed to create hardware id(s) on "
"%(storageSystemName)s."),
{'storageSystemName': storageSystemName})
return False
foundInitiatorGroupFromConnector = (
self._create_initiator_Group(
conn, controllerConfigService, igGroupName,
storageHardwareIDInstanceNames, extraSpecs))
storageGroupInstanceName = (
self._get_storage_group_from_masking_view(
conn, maskingViewName, storageSystemName))
portGroupInstanceName = self._get_port_group_from_masking_view(
conn, maskingViewName, storageSystemName)
if (foundInitiatorGroupFromConnector is not None and
storageGroupInstanceName is not None and
portGroupInstanceName is not None):
self._delete_masking_view(
conn, controllerConfigService, maskingViewName,
maskingViewInstanceName, extraSpecs)
newMaskingViewInstanceName = (
self._get_masking_view_instance_name(
conn, controllerConfigService, maskingViewName,
storageGroupInstanceName, portGroupInstanceName,
foundInitiatorGroupFromConnector, extraSpecs))
if newMaskingViewInstanceName is not None:
LOG.debug(
"The old masking view has been replaced: "
"%(maskingViewName)s.",
{'maskingViewName': maskingViewName})
else:
LOG.error(_LE(
"One of the components of the original masking view "
"%(maskingViewName)s cannot be retrieved so "
"please contact your system administrator to check "
"that the correct initiator(s) are part of masking."),
{'maskingViewName': maskingViewName})
return False
return True
def _create_initiator_Group(
self, conn, controllerConfigService, igGroupName,
hardwareIdinstanceNames, extraSpecs):
"""Create a new initiator group.
Given a list of hardwareId Instance name create a new
initiator group.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param igGroupName: the initiator group name (String)
:param hardwareIdinstanceNames: one or more hardware id instance names
:param extraSpecs: extra specifications
:returns: foundInitiatorGroupInstanceName
:raises: VolumeBackendAPIException
"""
rc, job = conn.InvokeMethod(
'CreateGroup', controllerConfigService, GroupName=igGroupName,
Type=self.utils.get_num(INITIATORGROUPTYPE, '16'),
Members=hardwareIdinstanceNames)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Create Group: %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'groupName': igGroupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
foundInitiatorGroupInstanceName = self._find_new_initiator_group(
conn, job)
return foundInitiatorGroupInstanceName
def _check_ig_rollback(
self, conn, controllerConfigService,
igGroupName, connector, extraSpecs):
"""Check if rollback action is required on an initiator group.
If anything goes wrong on a masking view creation, we need to check if
the process created a now-stale initiator group before failing, i.e.
an initiator group a) matching the name used in the mv process and
b) not associated with any other masking views.
If a stale ig exists, remove the initiators and delete the ig.
:param conn: the ecom connection
:param controllerConfigService: controller config service
:param igGroupName: the initiator group name
:param connector: the connector object
:param extraSpecs: extra specifications
"""
initiatorNames = self._find_initiator_names(conn, connector)
foundInitiatorGroupInstanceName = self._find_initiator_masking_group(
conn, controllerConfigService, initiatorNames)
if foundInitiatorGroupInstanceName:
initiatorGroupInstance = conn.GetInstance(
foundInitiatorGroupInstanceName, LocalOnly=False)
if initiatorGroupInstance['ElementName'] == igGroupName:
host = igGroupName.split("-")[1]
LOG.debug("Searching for masking views associated with "
"%(igGroupName)s",
{'igGroupName': igGroupName})
self._last_volume_delete_initiator_group(
conn, controllerConfigService,
foundInitiatorGroupInstanceName, extraSpecs, host)
def _get_port_group_from_masking_view(
self, conn, maskingViewName, storageSystemName):
"""Given the masking view name get the port group from it.
:param conn: connection to the ecom server
:param maskingViewName: the name of the masking view
:param storageSystemName: the storage system name
:returns: instance name foundPortMaskingGroupInstanceName
"""
foundPortMaskingGroupInstanceName = None
foundView = self._find_masking_view(
conn, maskingViewName, storageSystemName)
if foundView:
groups = conn.AssociatorNames(
foundView,
ResultClass='CIM_TargetMaskingGroup')
if len(groups) > 0:
foundPortMaskingGroupInstanceName = groups[0]
LOG.debug(
"Masking view: %(view)s InitiatorMaskingGroup: %(masking)s.",
{'view': maskingViewName,
'masking': foundPortMaskingGroupInstanceName})
return foundPortMaskingGroupInstanceName
def _delete_masking_view(
self, conn, controllerConfigService, maskingViewName,
maskingViewInstanceName, extraSpecs):
"""Delete a masking view.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param maskingViewName: maskingview name (String)
:param maskingViewInstanceName: the masking view instance name
:param extraSpecs: extra specifications
:raises: VolumeBackendAPIException
"""
rc, job = conn.InvokeMethod('DeleteMaskingView',
controllerConfigService,
ProtocolController=maskingViewInstanceName)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Modifying masking view : %(groupName)s. "
"Return code: %(rc)lu. Error: %(error)s.")
% {'groupName': maskingViewName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
def get_masking_view_from_storage_group(
self, conn, storageGroupInstanceName):
"""Get the associated maskingview instance name.
Given storage group instance name, get the associated masking
view instance name.
:param conn: connection to the ecom server
:param storageGroupInstanceName: the storage group instance name
:returns: instance name foundMaskingViewInstanceName
"""
foundMaskingViewInstanceName = None
maskingViews = conn.AssociatorNames(
storageGroupInstanceName,
ResultClass='Symm_LunMaskingView')
if len(maskingViews) > 0:
foundMaskingViewInstanceName = maskingViews[0]
return foundMaskingViewInstanceName
def add_volume_to_storage_group(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, sgGroupName, extraSpecs):
"""Add a volume to an existing storage group.
:param conn: connection to ecom server
:param controllerConfigService: the controller configuration service
:param storageGroupInstanceName: storage group instance name
:param volumeInstance: the volume instance
:param volumeName: the name of the volume (String)
:param sgGroupName: the name of the storage group (String)
:param extraSpecs: additional info
:returns: int -- rc the return code of the job
:returns: dict -- the job dict
"""
self.provision.add_members_to_masking_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance.path, volumeName, extraSpecs)
LOG.info(_LI(
"Added volume: %(volumeName)s to existing storage group "
"%(sgGroupName)s."),
{'volumeName': volumeName,
'sgGroupName': sgGroupName})
def remove_device_from_default_storage_group(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName, extraSpecs):
"""Remove the volume from the default storage group.
Remove the volume from the default storage group for the FAST
policy and return the default storage group instance name.
:param conn: the connection to the ecom server
:param controllerConfigService: the controller config service
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param fastPolicyName: the fast policy name (String)
:param extraSpecs: additional info
:returns: instance name defaultStorageGroupInstanceName
"""
failedRet = None
defaultStorageGroupInstanceName, defaultSgName = (
self.fast.get_and_verify_default_storage_group(
conn, controllerConfigService, volumeInstanceName,
volumeName, fastPolicyName))
if defaultStorageGroupInstanceName is None:
LOG.warning(_LW(
"Volume %(volumeName)s was not first part of the default "
"storage group for the FAST Policy."),
{'volumeName': volumeName})
return failedRet
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"There are %(length)lu associated with the default storage group "
"for fast before removing volume %(volumeName)s.",
{'length': len(assocVolumeInstanceNames),
'volumeName': volumeName})
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, defaultStorageGroupInstanceName,
volumeInstanceName, volumeName, extraSpecs)
assocVolumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"There are %(length)lu associated with the default storage group "
"for fast after removing volume %(volumeName)s.",
{'length': len(assocVolumeInstanceNames),
'volumeName': volumeName})
# Required for unit tests.
emptyStorageGroupInstanceName = (
self._wrap_get_storage_group_from_volume(conn, volumeInstanceName,
defaultSgName))
if emptyStorageGroupInstanceName is not None:
LOG.error(_LE(
"Failed to remove %(volumeName)s from the default storage "
"group for the FAST Policy."),
{'volumeName': volumeName})
return failedRet
return defaultStorageGroupInstanceName
def _wrap_get_storage_group_from_volume(self, conn, volumeInstanceName,
defaultSgName):
"""Wrapper for get_storage_group_from_volume.
Needed for override in tests.
:param conn: the connection to the ecom server
:param volumeInstanceName: the volume instance name
:param defaultSgName: the default storage group name
:returns: emptyStorageGroupInstanceName
"""
return self.utils.get_storage_group_from_volume(
conn, volumeInstanceName, defaultSgName)
def get_devices_from_storage_group(
self, conn, storageGroupInstanceName):
"""Get the associated volume Instance names.
Given the storage group instance name get the associated volume
Instance names.
:param conn: connection to the ecom server
:param storageGroupInstanceName: the storage group instance name
:returns: list -- volumeInstanceNames list of volume instance names
"""
volumeInstanceNames = conn.AssociatorNames(
storageGroupInstanceName,
ResultClass='EMC_StorageVolume')
return volumeInstanceNames
def get_associated_masking_groups_from_device(
self, conn, volumeInstanceName):
"""Get the associated storage groups from the volume Instance name.
Given the volume instance name get the associated storage group
instance names.
:param conn: connection to the ecom server
:param volumeInstanceName: the volume instance name
:returns: list -- list of storage group instance names
"""
maskingGroupInstanceNames = conn.AssociatorNames(
volumeInstanceName,
ResultClass='CIM_DeviceMaskingGroup',
AssocClass='CIM_OrderedMemberOfCollection')
if len(maskingGroupInstanceNames) > 0:
return maskingGroupInstanceNames
else:
LOG.info(_LI("Volume %(volumeName)s not in any storage group."),
{'volumeName': volumeInstanceName})
return None
def remove_and_reset_members(
self, conn, controllerConfigService, volumeInstance,
volumeName, extraSpecs, connector=None, reset=True):
"""This is called on a delete, unmap device or rollback.
If the connector is not None get the associated SG and remove volume
from the storage group, otherwise it is a VMAX3 deletion.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param volumeInstance: the volume Instance
:param volumeName: the volume name
:param extraSpecs: additional info
:param connector: optional
:param reset: reset, return to original SG (optional)
:returns: storageGroupInstanceName
"""
storageGroupInstanceName = None
if extraSpecs[ISV3]:
self._cleanup_deletion_v3(
conn, controllerConfigService, volumeInstance, extraSpecs)
else:
if connector:
storageGroupInstanceName = (
self._get_sg_associated_with_connector(
conn, controllerConfigService, volumeInstance.path,
volumeName, connector))
if storageGroupInstanceName:
self._remove_volume_from_sg(
conn, controllerConfigService,
storageGroupInstanceName,
volumeInstance, extraSpecs)
else:
LOG.warning(_LW("Cannot get storage from connector."))
if reset:
self._return_back_to_default_sg(
conn, controllerConfigService, volumeInstance, volumeName,
extraSpecs)
return storageGroupInstanceName
def _cleanup_deletion_v3(
self, conn, controllerConfigService, volumeInstance, extraSpecs):
"""Pre cleanup before VMAX3 deletion operation
:param conn: the ecom connection
:param controllerConfigService: storage system instance name
:param volumeInstance: the volume instance
:param extraSpecs: the extra specifications
"""
storageGroupInstanceNames = (
self.get_associated_masking_groups_from_device(
conn, volumeInstance.path))
if storageGroupInstanceNames:
sgNum = len(storageGroupInstanceNames)
if len(storageGroupInstanceNames) > 1:
LOG.warning(_LW("Volume %(volumeName)s is belong to "
"%(sgNum)s storage groups."),
{'volumeName': volumeInstance['ElementName'],
'sgNum': sgNum})
for storageGroupInstanceName in storageGroupInstanceNames:
self._remove_volume_from_sg(
conn, controllerConfigService,
storageGroupInstanceName,
volumeInstance,
extraSpecs)
def _remove_volume_from_sg(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, extraSpecs):
"""Remove volume from storage group
:param conn: the ecom connection
:param controllerConfigService: storage system instance name
:param storageGroupInstanceName: the SG instance name
:param volumeInstance: the volume instance
:param extraSpecs: the extra specifications
"""
instance = conn.GetInstance(storageGroupInstanceName, LocalOnly=False)
storageGroupName = instance['ElementName']
@lockutils.synchronized(storageGroupName + 'remove',
"emc-remove-sg", True)
def do_remove_volume_from_sg():
volumeInstanceNames = self.get_devices_from_storage_group(
conn, storageGroupInstanceName)
numVolInStorageGroup = len(volumeInstanceNames)
LOG.debug(
"There are %(numVol)d volumes in the storage group "
"%(maskingGroup)s.",
{'numVol': numVolInStorageGroup,
'maskingGroup': storageGroupInstanceName})
if numVolInStorageGroup == 1:
# Last volume in the storage group.
self._last_vol_in_SG(
conn, controllerConfigService, storageGroupInstanceName,
storageGroupName, volumeInstance,
volumeInstance['ElementName'], extraSpecs)
else:
# Not the last volume so remove it from storage group
self._multiple_vols_in_SG(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeInstance['ElementName'],
numVolInStorageGroup, extraSpecs)
return do_remove_volume_from_sg()
def _last_vol_in_SG(
self, conn, controllerConfigService, storageGroupInstanceName,
storageGroupName, volumeInstance, volumeName, extraSpecs):
"""Steps if the volume is the last in a storage group.
1. Check if the volume is in a masking view.
2. If it is in a masking view, delete the masking view, remove the
initiators from the initiator group and delete the initiator
group if there are no other masking views associated with the
initiator group, remove the volume from the storage group, and
delete the storage group.
3. If it is not in a masking view, remove the volume from the
storage group and delete the storage group.
:param conn: the ecom connection
:param controllerConfigService: storage system instance name
:param storageGroupInstanceName: the SG instance name
:param storageGroupName: the Storage group name (String)
:param volumeInstance: the volume instance
:param volumeName: the volume name
:param extraSpecs: the extra specifications
"""
status = False
LOG.debug("Only one volume remains in storage group "
"%(sgname)s. Driver will attempt cleanup.",
{'sgname': storageGroupName})
mvInstanceName = self.get_masking_view_from_storage_group(
conn, storageGroupInstanceName)
if mvInstanceName is None:
LOG.debug("Unable to get masking view %(maskingView)s "
"from storage group.",
{'maskingView': mvInstanceName})
# Remove the volume from the storage group and delete the SG.
self._remove_last_vol_and_delete_sg(
conn, controllerConfigService,
storageGroupInstanceName,
storageGroupName, volumeInstance.path,
volumeName, extraSpecs)
status = True
else:
maskingViewInstance = conn.GetInstance(
mvInstanceName, LocalOnly=False)
maskingViewName = maskingViewInstance['ElementName']
@lockutils.synchronized(maskingViewName,
"emc-mv-", True)
def do_delete_mv_ig_and_sg():
return self._delete_mv_ig_and_sg(
conn, controllerConfigService, mvInstanceName,
maskingViewName, storageGroupInstanceName,
storageGroupName, volumeInstance, volumeName,
extraSpecs)
do_delete_mv_ig_and_sg()
status = True
return status
def _multiple_vols_in_SG(
self, conn, controllerConfigService, storageGroupInstanceName,
volumeInstance, volumeName, numVolsInSG, extraSpecs):
"""If the volume is not the last in the storage group
Remove the volume from the SG.
:param conn: the ecom connection
:param controllerConfigService: storage system instance name
:param storageGroupInstanceName: the SG instance name
:param volumeInstance: the volume instance
:param volumeName: the volume name
:param numVolsInSG: the number of volumes in the SG
:param extraSpecs: the extra specifications
"""
LOG.debug("Start: number of volumes in masking storage group: "
"%(numVol)d", {'numVol': numVolsInSG})
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstance.path, volumeName, extraSpecs)
LOG.debug(
"RemoveMembers for volume %(volumeName)s completed "
"successfully.", {'volumeName': volumeName})
volumeInstanceNames = self.get_devices_from_storage_group(
conn, storageGroupInstanceName)
LOG.debug(
"End: number of volumes in masking storage group: %(numVol)d.",
{'numVol': len(volumeInstanceNames)})
def _delete_mv_ig_and_sg(
self, conn, controllerConfigService, mvInstanceName,
maskingViewName, storageGroupInstanceName, storageGroupName,
volumeInstance, volumeName, extraSpecs):
"""Delete the Masking view, the storage Group and the initiator group.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param mvInstanceName: masking view instance name
:param maskingViewName: masking view name
:param storageGroupInstanceName: storage group instance name
:param maskingViewName: masking view name
:param volumeInstance: the volume Instance
:param volumeName: the volume name
:param extraSpecs: extra specs
"""
isV3 = extraSpecs[ISV3]
fastPolicyName = extraSpecs.get(FASTPOLICY, None)
host = maskingViewName.split("-")[1]
storageSystemInstanceName = self.utils.find_storage_system(
conn, controllerConfigService)
initiatorGroupInstanceName = (
self.get_initiator_group_from_masking_view(conn, mvInstanceName))
self._last_volume_delete_masking_view(
conn, controllerConfigService, mvInstanceName,
maskingViewName, extraSpecs)
self._last_volume_delete_initiator_group(
conn, controllerConfigService,
initiatorGroupInstanceName, extraSpecs, host)
if not isV3:
isTieringPolicySupported, tierPolicyServiceInstanceName = (
self._get_tiering_info(conn, storageSystemInstanceName,
fastPolicyName))
self._get_and_remove_rule_association(
conn, fastPolicyName,
isTieringPolicySupported,
tierPolicyServiceInstanceName,
storageSystemInstanceName['Name'],
storageGroupInstanceName, extraSpecs)
self._remove_last_vol_and_delete_sg(
conn, controllerConfigService, storageGroupInstanceName,
storageGroupName, volumeInstance.path, volumeName,
extraSpecs)
LOG.debug(
"Volume %(volumeName)s successfully removed from SG and "
"Storage Group %(storageGroupName)s successfully deleted. ",
{'volumeName': volumeName,
'storageGroupName': storageGroupName})
def _return_back_to_default_sg(
self, conn, controllerConfigService, volumeInstance, volumeName,
extraSpecs):
"""Return volume to default storage group
Moving the volume to the default SG for VMAX3 and
FAST for VMAX2.
:param conn: connection to the ecom server
:param controllerConfigService: the controller configuration service
:param volumeInstance: the volume Instance
:param volumeName: the volume name
:param extraSpecs: extra specs
"""
# Add it back to the default storage group.
if extraSpecs[ISV3]:
self.return_volume_to_default_storage_group_v3(
conn, controllerConfigService,
volumeInstance, volumeName, extraSpecs)
else:
# V2 if FAST POLICY enabled, move the volume to the default
# SG.
fastPolicyName = extraSpecs.get(FASTPOLICY, None)
storageSystemInstanceName = self.utils.find_storage_system(
conn, controllerConfigService)
isTieringPolicySupported, __ = (
self._get_tiering_info(conn, storageSystemInstanceName,
fastPolicyName))
if fastPolicyName is not None and isTieringPolicySupported:
self._cleanup_tiering(
conn, controllerConfigService, fastPolicyName,
volumeInstance, volumeName, extraSpecs)
def _get_sg_associated_with_connector(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, connector):
"""Get storage group associated with connector.
If the connector gets passed then extra logic required to
get storage group.
:param conn: the ecom connection
:param controllerConfigService: storage system instance name
:param volumeInstanceName: the volume instance name
:param volumeName: the volume name (String)
:param connector: the connector object
:returns: storageGroupInstanceName(can be None)
"""
return self._get_sg_or_mv_associated_with_initiator(
conn, controllerConfigService, volumeInstanceName,
volumeName, connector, True)
def _get_tiering_info(
self, conn, storageSystemInstanceName, fastPolicyName):
"""Get tiering specifics.
:param conn: the ecom connection
:param storageSystemInstanceName: storage system instance name
:param fastPolicyName:
:returns: boolean -- isTieringPolicySupported
:returns: tierPolicyServiceInstanceName
"""
isTieringPolicySupported = False
tierPolicyServiceInstanceName = None
if fastPolicyName is not None:
tierPolicyServiceInstanceName = self.utils.get_tier_policy_service(
conn, storageSystemInstanceName)
isTieringPolicySupported = self.fast.is_tiering_policy_enabled(
conn, tierPolicyServiceInstanceName)
LOG.debug(
"FAST policy enabled on %(storageSystem)s: %(isSupported)s",
{'storageSystem': storageSystemInstanceName,
'isSupported': isTieringPolicySupported})
return isTieringPolicySupported, tierPolicyServiceInstanceName
def _last_volume_delete_masking_view(
self, conn, controllerConfigService, mvInstanceName,
maskingViewName, extraSpecs):
"""Delete the masking view.
Delete the masking view if the volume is the last one in the
storage group.
:param conn: the ecom connection
:param controllerConfigService: controller config service
:param mvInstanceName: masking view instance name
:param maskingViewName: masking view name
:param extraSpecs: extra specifications
"""
LOG.debug(
"Last volume in the storage group, deleting masking view "
"%(maskingViewName)s.",
{'maskingViewName': maskingViewName})
self._delete_masking_view(
conn, controllerConfigService, maskingViewName,
mvInstanceName, extraSpecs)
mvInstance = self.utils.get_existing_instance(
conn, mvInstanceName)
if mvInstance:
exceptionMessage = (_(
"Masking view %(maskingViewName)s "
"was not deleted successfully") %
{'maskingViewName': maskingViewName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.info(_LI(
"Masking view %(maskingViewName)s successfully deleted."),
{'maskingViewName': maskingViewName})
def _get_and_remove_rule_association(
self, conn, fastPolicyName, isTieringPolicySupported,
tierPolicyServiceInstanceName, storageSystemName,
storageGroupInstanceName, extraSpecs):
"""Remove the storage group from the policy rule.
:param conn: the ecom connection
:param fastPolicyName: the fast policy name
:param isTieringPolicySupported: boolean
:param tierPolicyServiceInstanceName: the tier policy instance name
:param storageSystemName: storage system name
:param storageGroupInstanceName: the storage group instance name
:param extraSpecs: additional info
"""
# Disassociate storage group from FAST policy.
if fastPolicyName is not None and isTieringPolicySupported is True:
tierPolicyInstanceName = self.fast.get_tier_policy_by_name(
conn, storageSystemName, fastPolicyName)
LOG.debug(
"Policy: %(policy)s, policy service:%(service)s, "
"masking group: %(maskingGroup)s.",
{'policy': tierPolicyInstanceName,
'service': tierPolicyServiceInstanceName,
'maskingGroup': storageGroupInstanceName})
self.fast.delete_storage_group_from_tier_policy_rule(
conn, tierPolicyServiceInstanceName,
storageGroupInstanceName, tierPolicyInstanceName, extraSpecs)
def return_volume_to_default_storage_group_v3(
self, conn, controllerConfigurationService,
volumeInstance, volumeName, extraSpecs):
"""Return volume to the default storage group in v3.
:param conn: the ecom connection
:param controllerConfigService: controller config service
:param volumeInstance: volumeInstance
:param volumeName: the volume name
:param extraSpecs: additional info
:raises: VolumeBackendAPIException
"""
storageGroupName = self.utils.get_v3_storage_group_name(
extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO],
extraSpecs[self.utils.WORKLOAD])
storageGroupInstanceName = self.utils.find_storage_masking_group(
conn, controllerConfigurationService, storageGroupName)
if not storageGroupInstanceName:
storageGroupInstanceName = (
self.provisionv3.create_storage_group_v3(
conn, controllerConfigurationService, storageGroupName,
extraSpecs[self.utils.POOL], extraSpecs[self.utils.SLO],
extraSpecs[self.utils.WORKLOAD], extraSpecs))
if not storageGroupInstanceName:
errorMessage = (_("Failed to create storage group "
"%(storageGroupName)s.") %
{'storageGroupName': storageGroupName})
LOG.error(errorMessage)
raise exception.VolumeBackendAPIException(data=errorMessage)
self._add_volume_to_sg_and_verify(
conn, controllerConfigurationService,
storageGroupInstanceName, volumeInstance, volumeName,
storageGroupName, extraSpecs)
def _cleanup_tiering(
self, conn, controllerConfigService, fastPolicyName,
volumeInstance, volumeName, extraSpecs):
"""Clean up tiering.
:param conn: the ecom connection
:param controllerConfigService: the controller configuration service
:param fastPolicyName: the fast policy name
:param volumeInstance: volume instance
:param volumeName: the volume name
:param extraSpecs: additional info
"""
defaultStorageGroupInstanceName = (
self.fast.get_policy_default_storage_group(
conn, controllerConfigService, fastPolicyName))
volumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"Start: number of volumes in default storage group: %(numVol)d.",
{'numVol': len(volumeInstanceNames)})
defaultStorageGroupInstanceName = (
self.fast.add_volume_to_default_storage_group_for_fast_policy(
conn, controllerConfigService, volumeInstance, volumeName,
fastPolicyName, extraSpecs))
# Check default storage group number of volumes.
volumeInstanceNames = self.get_devices_from_storage_group(
conn, defaultStorageGroupInstanceName)
LOG.debug(
"End: number of volumes in default storage group: %(numVol)d.",
{'numVol': len(volumeInstanceNames)})
def get_target_wwns(self, conn, mvInstanceName):
"""Get the DA ports wwns.
:param conn: the ecom connection
:param mvInstanceName: masking view instance name
:returns: list -- the list of target wwns for the masking view
"""
targetWwns = []
targetPortInstanceNames = conn.AssociatorNames(
mvInstanceName,
ResultClass='Symm_FCSCSIProtocolEndpoint')
numberOfPorts = len(targetPortInstanceNames)
if numberOfPorts <= 0:
LOG.warning(_LW("No target ports found in "
"masking view %(maskingView)s."),
{'numPorts': len(targetPortInstanceNames),
'maskingView': mvInstanceName})
for targetPortInstanceName in targetPortInstanceNames:
targetWwns.append(targetPortInstanceName['Name'])
return targetWwns
def get_masking_view_by_volume(self, conn, volumeInstance, connector):
"""Given volume, retrieve the masking view instance name.
:param conn: the ecom connection
:param volumeInstance: the volume instance
:param connector: the connector object
:returns: masking view instance name
"""
storageSystemName = volumeInstance['SystemName']
controllerConfigService = (
self.utils.find_controller_configuration_service(
conn, storageSystemName))
volumeName = volumeInstance['ElementName']
mvInstanceName = (
self._get_sg_or_mv_associated_with_initiator(
conn, controllerConfigService, volumeInstance.path,
volumeName, connector, False))
return mvInstanceName
def get_masking_views_by_port_group(self, conn, portGroupInstanceName):
"""Given port group, retrieve the masking view instance name.
:param conn: the ecom connection
:param portGroupInstanceName: the instance name of the port group
:returns: masking view instance names
"""
mvInstanceNames = conn.AssociatorNames(
portGroupInstanceName, ResultClass='Symm_LunMaskingView')
return mvInstanceNames
def get_masking_views_by_initiator_group(
self, conn, initiatorGroupInstanceName):
"""Given initiator group, retrieve the masking view instance name.
Retrieve the list of masking view instances associated with the
initiator group instance name.
:param conn: the ecom connection
:param initiatorGroupInstanceName: the instance name of the
initiator group
:returns: list of masking view instance names
"""
mvInstanceNames = conn.AssociatorNames(
initiatorGroupInstanceName, ResultClass='Symm_LunMaskingView')
return mvInstanceNames
def get_port_group_from_masking_view(self, conn, maskingViewInstanceName):
"""Get the port group in a masking view.
:param conn: the ecom connection
:param maskingViewInstanceName: masking view instance name
:returns: portGroupInstanceName
"""
portGroupInstanceNames = conn.AssociatorNames(
maskingViewInstanceName, ResultClass='SE_TargetMaskingGroup')
if len(portGroupInstanceNames) > 0:
LOG.debug("Found port group %(pg)s in masking view %(mv)s.",
{'pg': portGroupInstanceNames[0],
'mv': maskingViewInstanceName})
return portGroupInstanceNames[0]
else:
LOG.warning(_LW("No port group found in masking view %(mv)s."),
{'mv': maskingViewInstanceName})
def get_initiator_group_from_masking_view(
self, conn, maskingViewInstanceName):
"""Get initiator group in a masking view.
:param conn: the ecom connection
:param maskingViewInstanceName: masking view instance name
:returns: initiatorGroupInstanceName or None if it is not found
"""
initiatorGroupInstanceNames = conn.AssociatorNames(
maskingViewInstanceName, ResultClass='SE_InitiatorMaskingGroup')
if len(initiatorGroupInstanceNames) > 0:
LOG.debug("Found initiator group %(ig)s in masking view %(mv)s.",
{'ig': initiatorGroupInstanceNames[0],
'mv': maskingViewInstanceName})
return initiatorGroupInstanceNames[0]
else:
LOG.warning(_LW("No Initiator group found in masking view "
"%(mv)s."), {'mv': maskingViewInstanceName})
def _get_sg_or_mv_associated_with_initiator(
self, conn, controllerConfigService, volumeInstanceName,
volumeName, connector, getSG=True):
"""Get storage group or masking view associated with connector.
If the connector gets passed then extra logic required to
get storage group.
:param conn: the ecom connection
:param controllerConfigService: storage system instance name
:param volumeInstanceName: volume instance name
:param volumeName: volume element name
:param connector: the connector object
:param getSG: True if to get storage group; otherwise get masking
:returns: foundInstanceName(can be None)
"""
foundInstanceName = None
initiatorNames = self._find_initiator_names(conn, connector)
igInstanceNameFromConnector = self._find_initiator_masking_group(
conn, controllerConfigService, initiatorNames)
# Device can be shared by multi-SGs in a multi-host attach case.
storageGroupInstanceNames = (
self.get_associated_masking_groups_from_device(
conn, volumeInstanceName))
LOG.debug("Found storage groups volume "
"%(volumeName)s is in: %(storageGroups)s",
{'volumeName': volumeName,
'storageGroups': storageGroupInstanceNames})
if storageGroupInstanceNames: # not empty
# Get the SG by IGs.
for sgInstanceName in storageGroupInstanceNames:
# Get maskingview from storage group.
mvInstanceName = self.get_masking_view_from_storage_group(
conn, sgInstanceName)
# Get initiator group from masking view.
if mvInstanceName:
LOG.debug("Found masking view associated with SG "
"%(storageGroup)s: %(maskingview)s",
{'maskingview': mvInstanceName,
'storageGroup': sgInstanceName})
igInstanceName = (
self.get_initiator_group_from_masking_view(
conn, mvInstanceName))
LOG.debug("Initiator Group in masking view %(ig)s: "
"IG associated with connector "
"%(igFromConnector)s.",
{'ig': igInstanceName,
'igFromConnector': igInstanceNameFromConnector})
if igInstanceName == igInstanceNameFromConnector:
if getSG is True:
foundInstanceName = sgInstanceName
LOG.debug("Found the storage group associated "
"with initiator %(initiator)s: "
"%(storageGroup)s",
{'initiator': initiatorNames,
'storageGroup': foundInstanceName})
else:
foundInstanceName = mvInstanceName
LOG.debug("Found the masking view associated with "
"initiator %(initiator)s: "
"%(maskingview)s.",
{'initiator': initiatorNames,
'maskingview': foundInstanceName})
break
return foundInstanceName
def _remove_last_vol_and_delete_sg(self, conn, controllerConfigService,
storageGroupInstanceName,
storageGroupName, volumeInstanceName,
volumeName, extraSpecs):
"""Remove the last volume and delete the storage group
:param conn: the ecom connection
:param controllerConfigService: controller config service
:param storageGroupInstanceName: storage group instance name
:param storageGroupName: storage group name
:param volumeInstanceName: volume instance name
:param volumeName: volume name
:param extrSpecs: additional info
"""
self.provision.remove_device_from_storage_group(
conn, controllerConfigService, storageGroupInstanceName,
volumeInstanceName, volumeName, extraSpecs)
LOG.debug(
"Remove the last volume %(volumeName)s completed "
"successfully.",
{'volumeName': volumeName})
# Delete storage group.
self._delete_storage_group(conn, controllerConfigService,
storageGroupInstanceName,
storageGroupName, extraSpecs)
storageGroupInstance = self.utils.get_existing_instance(
conn, storageGroupInstanceName)
if storageGroupInstance:
exceptionMessage = (_(
"Storage group %(storageGroupName)s "
"was not deleted successfully") %
{'storageGroupName': storageGroupName})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.info(_LI(
"Storage Group %(storageGroupName)s successfully deleted."),
{'storageGroupName': storageGroupName})
def _delete_storage_group(self, conn, controllerConfigService,
storageGroupInstanceName, storageGroupName,
extraSpecs):
"""Delete empty storage group
:param conn: the ecom connection
:param controllerConfigService: controller config service
:param storageGroupInstanceName: storage group instance name
:param storageGroupName: storage group name
:param extraSpecs: extra specifications
"""
rc, job = conn.InvokeMethod(
'DeleteGroup',
controllerConfigService,
MaskingGroup=storageGroupInstanceName,
Force=True)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Deleting Group: %(storageGroupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'storageGroupName': storageGroupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
def _delete_initiator_group(self, conn, controllerConfigService,
initiatorGroupInstanceName, initiatorGroupName,
extraSpecs):
"""Delete an initiatorGroup.
:param conn - connection to the ecom server
:param controllerConfigService - controller config service
:param initiatorGroupInstanceName - the initiator group instance name
:param initiatorGroupName - initiator group name
:param extraSpecs: extra specifications
"""
rc, job = conn.InvokeMethod(
'DeleteGroup',
controllerConfigService,
MaskingGroup=initiatorGroupInstanceName,
Force=True)
if rc != 0:
rc, errordesc = self.utils.wait_for_job_complete(conn, job,
extraSpecs)
if rc != 0:
exceptionMessage = (_(
"Error Deleting Initiator Group: %(initiatorGroupName)s. "
"Return code: %(rc)lu. Error: %(error)s")
% {'initiatorGroupName': initiatorGroupName,
'rc': rc,
'error': errordesc})
LOG.error(exceptionMessage)
raise exception.VolumeBackendAPIException(
data=exceptionMessage)
else:
LOG.debug("Initiator group %(initiatorGroupName)s "
"is successfully deleted.",
{'initiatorGroupName': initiatorGroupName})
else:
LOG.debug("Initiator group %(initiatorGroupName)s "
"is successfully deleted.",
{'initiatorGroupName': initiatorGroupName})
def _delete_storage_hardware_id(self,
conn,
hardwareIdManagementService,
hardwareIdPath):
"""Delete given initiator path
Delete the initiator. Do not rise exception or failure if deletion
fails due to any reasons.
:param conn - connection to the ecom server
:param hardwareIdManagementService - hardware id management service
:param hardwareIdPath - The path of the initiator object
"""
ret = conn.InvokeMethod('DeleteStorageHardwareID',
hardwareIdManagementService,
HardwareID = hardwareIdPath)
if ret == 0:
LOG.debug("Deletion of initiator path %(hardwareIdPath)s "
"is successful.", {'hardwareIdPath': hardwareIdPath})
else:
LOG.warning(_LW("Deletion of initiator path %(hardwareIdPath)s "
"is failed."), {'hardwareIdPath': hardwareIdPath})
def _delete_initiators_from_initiator_group(self, conn,
controllerConfigService,
initiatorGroupInstanceName,
initiatorGroupName):
"""Delete initiators
Delete all initiators associated with the initiator group instance.
Cleanup whatever is possible. It will not return any failure or
rise exception if deletion fails due to any reasons.
:param conn - connection to the ecom server
:param controllerConfigService - controller config service
:param initiatorGroupInstanceName - the initiator group instance name
"""
storageHardwareIdInstanceNames = (
conn.AssociatorNames(initiatorGroupInstanceName,
ResultClass='SE_StorageHardwareID'))
if len(storageHardwareIdInstanceNames) == 0:
LOG.debug("No initiators found in Initiator group "
"%(initiatorGroupName)s.",
{'initiatorGroupName': initiatorGroupName})
return
storageSystemName = controllerConfigService['SystemName']
hardwareIdManagementService = (
self.utils.find_storage_hardwareid_service(conn,
storageSystemName))
for storageHardwareIdInstanceName in storageHardwareIdInstanceNames:
initiatorName = storageHardwareIdInstanceName['InstanceID']
hardwareIdPath = storageHardwareIdInstanceName
LOG.debug("Initiator %(initiatorName)s "
"will be deleted from the Initiator group "
"%(initiatorGroupName)s. HardwareIdPath is "
"%(hardwareIdPath)s.",
{'initiatorName': initiatorName,
'initiatorGroupName': initiatorGroupName,
'hardwareIdPath': hardwareIdPath})
self._delete_storage_hardware_id(conn,
hardwareIdManagementService,
hardwareIdPath)
def _last_volume_delete_initiator_group(
self, conn, controllerConfigService,
initiatorGroupInstanceName, extraSpecs, host=None):
"""Delete the initiator group.
Delete the Initiator group if it has been created by the VMAX driver,
and if there are no masking views associated with it.
:param conn: the ecom connection
:param controllerConfigService: controller config service
:param igInstanceNames: initiator group instance name
:param extraSpecs: extra specifications
:param host: the short name of the host
"""
defaultInitiatorGroupName = None
initiatorGroupInstance = conn.GetInstance(initiatorGroupInstanceName)
initiatorGroupName = initiatorGroupInstance['ElementName']
protocol = self.utils.get_short_protocol_type(self.protocol)
if host:
defaultInitiatorGroupName = ((
"OS-%(shortHostName)s-%(protocol)s-IG"
% {'shortHostName': host,
'protocol': protocol}))
if initiatorGroupName == defaultInitiatorGroupName:
maskingViewInstanceNames = (
self.get_masking_views_by_initiator_group(
conn, initiatorGroupInstanceName))
if len(maskingViewInstanceNames) == 0:
LOG.debug(
"Last volume associated with the initiator group - "
"deleting the associated initiator group "
"%(initiatorGroupName)s.",
{'initiatorGroupName': initiatorGroupName})
self._delete_initiators_from_initiator_group(
conn, controllerConfigService, initiatorGroupInstanceName,
initiatorGroupName)
self._delete_initiator_group(conn, controllerConfigService,
initiatorGroupInstanceName,
initiatorGroupName, extraSpecs)
else:
LOG.warning(_LW("Initiator group %(initiatorGroupName)s is "
"associated with masking views and can't be "
"deleted. Number of associated masking view "
"is: %(nmv)d."),
{'initiatorGroupName': initiatorGroupName,
'nmv': len(maskingViewInstanceNames)})
else:
LOG.warning(_LW("Initiator group %(initiatorGroupName)s was "
"not created by the VMAX driver so will "
"not be deleted by the VMAX driver."),
{'initiatorGroupName': initiatorGroupName})
def _create_hardware_ids(
self, conn, initiatorNames, storageSystemName):
"""Create hardwareIds for initiator(s).
:param conn: the connection to the ecom server
:param initiatorNames: the list of initiator names
:param storageSystemName: the storage system name
:returns: list -- foundHardwareIDsInstanceNames
"""
foundHardwareIDsInstanceNames = []
hardwareIdManagementService = (
self.utils.find_storage_hardwareid_service(
conn, storageSystemName))
for initiatorName in initiatorNames:
hardwareIdInstanceName = (
self.utils.create_storage_hardwareId_instance_name(
conn, hardwareIdManagementService, initiatorName))
LOG.debug(
"Created hardwareId Instance: %(hardwareIdInstanceName)s.",
{'hardwareIdInstanceName': hardwareIdInstanceName})
foundHardwareIDsInstanceNames.append(hardwareIdInstanceName)
return foundHardwareIDsInstanceNames
def _get_port_group_name_from_mv(self, conn, maskingViewName,
storageSystemName):
"""Get the port group name from the masking view.
:param conn: the connection to the ecom server
:param maskingViewName: the masking view name
:param storageSystemName: the storage system name
:returns: String - port group name
String - error message
"""
errorMessage = None
portGroupName = None
portGroupInstanceName = (
self._get_port_group_from_masking_view(
conn, maskingViewName, storageSystemName))
if portGroupInstanceName is None:
LOG.error(_LE(
"Cannot get port group from masking view: "
"%(maskingViewName)s. "),
{'maskingViewName': maskingViewName})
else:
try:
portGroupInstance = (
conn.GetInstance(portGroupInstanceName))
portGroupName = (
portGroupInstance['ElementName'])
except Exception:
LOG.error(_LE(
"Cannot get port group name."))
return portGroupName, errorMessage
| apache-2.0 |
kaze/paasmaker | paasmaker/common/stats/base.py | 2 | 1797 | #
# Paasmaker - Platform as a Service
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
import paasmaker
import tornado.testing
import colander
class BaseStatsConfigurationSchema(colander.MappingSchema):
# No options defined.
pass
class BaseStats(paasmaker.util.plugin.Plugin):
"""
This plugin is used to fetch stats on the node, which are
reported back to the master for informational purposes.
Additionally, these stats are used by scoring plugins to
calculate the "score" for a node. The score is used by
the Pacemaker to rank nodes when determining where to place
applications.
These plugins are called each time the node reports back
to the master node.
"""
MODES = {
paasmaker.util.plugin.MODE.NODE_STATS: None
}
OPTIONS_SCHEMA = BaseStatsConfigurationSchema()
def stats(self, existing_stats, callback):
"""
Alter or insert into the provided existing stats array. Call the callback
with the dictionary once completed.
For example::
def stats(self, existing_stats, callback):
existing_stats['my_stat'] = 1.0
callback(existing_stats)
:arg dict existing_stats: The existing stats. Insert your stats into
this dictionary.
:arg callable callback: The callback to call once done.
"""
raise NotImplementedError("You must implement stats().")
class BaseStatsTest(tornado.testing.AsyncTestCase):
def setUp(self):
super(BaseStatsTest, self).setUp()
self.configuration = paasmaker.common.configuration.ConfigurationStub(0, ['pacemaker'], io_loop=self.io_loop)
def tearDown(self):
self.configuration.cleanup(self.stop, self.stop)
self.wait()
super(BaseStatsTest, self).tearDown() | mpl-2.0 |
ms-iot/python | cpython/Lib/test/test_normalization.py | 7 | 3162 | from test.support import open_urlresource
import unittest
from http.client import HTTPException
import sys
import os
from unicodedata import normalize, unidata_version
TESTDATAFILE = "NormalizationTest.txt"
TESTDATAURL = "http://www.pythontest.net/unicode/" + unidata_version + "/" + TESTDATAFILE
def check_version(testfile):
hdr = testfile.readline()
return unidata_version in hdr
class RangeError(Exception):
pass
def NFC(str):
return normalize("NFC", str)
def NFKC(str):
return normalize("NFKC", str)
def NFD(str):
return normalize("NFD", str)
def NFKD(str):
return normalize("NFKD", str)
def unistr(data):
data = [int(x, 16) for x in data.split(" ")]
for x in data:
if x > sys.maxunicode:
raise RangeError
return "".join([chr(x) for x in data])
class NormalizationTest(unittest.TestCase):
def test_main(self):
part = None
part1_data = {}
# Hit the exception early
try:
testdata = open_urlresource(TESTDATAURL, encoding="utf-8",
check=check_version)
except (OSError, HTTPException):
self.skipTest("Could not retrieve " + TESTDATAURL)
self.addCleanup(testdata.close)
for line in testdata:
if '#' in line:
line = line.split('#')[0]
line = line.strip()
if not line:
continue
if line.startswith("@Part"):
part = line.split()[0]
continue
try:
c1,c2,c3,c4,c5 = [unistr(x) for x in line.split(';')[:-1]]
except RangeError:
# Skip unsupported characters;
# try at least adding c1 if we are in part1
if part == "@Part1":
try:
c1 = unistr(line.split(';')[0])
except RangeError:
pass
else:
part1_data[c1] = 1
continue
# Perform tests
self.assertTrue(c2 == NFC(c1) == NFC(c2) == NFC(c3), line)
self.assertTrue(c4 == NFC(c4) == NFC(c5), line)
self.assertTrue(c3 == NFD(c1) == NFD(c2) == NFD(c3), line)
self.assertTrue(c5 == NFD(c4) == NFD(c5), line)
self.assertTrue(c4 == NFKC(c1) == NFKC(c2) == \
NFKC(c3) == NFKC(c4) == NFKC(c5),
line)
self.assertTrue(c5 == NFKD(c1) == NFKD(c2) == \
NFKD(c3) == NFKD(c4) == NFKD(c5),
line)
# Record part 1 data
if part == "@Part1":
part1_data[c1] = 1
# Perform tests for all other data
for c in range(sys.maxunicode+1):
X = chr(c)
if X in part1_data:
continue
self.assertTrue(X == NFC(X) == NFD(X) == NFKC(X) == NFKD(X), c)
def test_bug_834676(self):
# Check for bug 834676
normalize('NFC', '\ud55c\uae00')
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
marratj/ansible | lib/ansible/modules/network/f5/bigip_snmp.py | 10 | 7491 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: bigip_snmp
short_description: Manipulate general SNMP settings on a BIG-IP.
description:
- Manipulate general SNMP settings on a BIG-IP.
version_added: 2.4
options:
contact:
description:
- Specifies the name of the person who administers the SNMP
service for this system.
agent_status_traps:
description:
- When C(enabled), ensures that the system sends a trap whenever the
SNMP agent starts running or stops running. This is usually enabled
by default on a BIG-IP.
choices:
- enabled
- disabled
agent_authentication_traps:
description:
- When C(enabled), ensures that the system sends authentication warning
traps to the trap destinations. This is usually disabled by default on
a BIG-IP.
choices:
- enabled
- disabled
device_warning_traps:
description:
- When C(enabled), ensures that the system sends device warning traps
to the trap destinations. This is usually enabled by default on a
BIG-IP.
choices:
- enabled
- disabled
location:
description:
- Specifies the description of this system's physical location.
notes:
- Requires the f5-sdk Python package on the host. This is as easy as pip
install f5-sdk.
extends_documentation_fragment: f5
requirements:
- f5-sdk >= 2.2.0
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = '''
- name: Set snmp contact
bigip_snmp:
contact: "Joe User"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
validate_certs: "false"
delegate_to: localhost
- name: Set snmp location
bigip_snmp:
location: "US West 1"
password: "secret"
server: "lb.mydomain.com"
user: "admin"
validate_certs: "false"
delegate_to: localhost
'''
RETURN = '''
agent_status_traps:
description: Value that the agent status traps was set to.
returned: changed
type: string
sample: "enabled"
agent_authentication_traps:
description: Value that the authentication status traps was set to.
returned: changed
type: string
sample: "enabled"
device_warning_traps:
description: Value that the warning status traps was set to.
returned: changed
type: string
sample: "enabled"
contact:
description: The new value for the person who administers SNMP on the device.
returned: changed
type: string
sample: Joe User
location:
description: The new value for the system's physical location.
returned: changed
type: string
sample: "US West 1a"
'''
from ansible.module_utils.f5_utils import (
AnsibleF5Client,
AnsibleF5Parameters,
HAS_F5SDK,
F5ModuleError,
iControlUnexpectedHTTPError
)
class Parameters(AnsibleF5Parameters):
api_map = {
'agentTrap': 'agent_status_traps',
'authTrap': 'agent_authentication_traps',
'bigipTraps': 'device_warning_traps',
'sysLocation': 'location',
'sysContact': 'contact'
}
updatables = [
'agent_status_traps', 'agent_authentication_traps',
'device_warning_traps', 'location', 'contact'
]
returnables = [
'agent_status_traps', 'agent_authentication_traps',
'device_warning_traps', 'location', 'contact'
]
api_attributes = [
'agentTrap', 'authTrap', 'bigipTraps', 'sysLocation', 'sysContact'
]
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
class ModuleManager(object):
def __init__(self, client):
self.client = client
self.have = None
self.want = Parameters(self.client.module.params)
self.changes = Parameters()
def _update_changed_options(self):
changed = {}
for key in Parameters.updatables:
if getattr(self.want, key) is not None:
attr1 = getattr(self.want, key)
attr2 = getattr(self.have, key)
if attr1 != attr2:
changed[key] = attr1
if changed:
self.changes = Parameters(changed)
return True
return False
def exec_module(self):
result = dict()
try:
changed = self.update()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.client.check_mode:
return True
self.update_on_device()
return True
def update_on_device(self):
params = self.want.api_params()
result = self.client.api.tm.sys.snmp.load()
result.modify(**params)
def read_current_from_device(self):
resource = self.client.api.tm.sys.snmp.load()
result = resource.attrs
return Parameters(result)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
self.choices = ['enabled', 'disabled']
self.argument_spec = dict(
contact=dict(
required=False,
default=None
),
agent_status_traps=dict(
required=False,
default=None,
choices=self.choices
),
agent_authentication_traps=dict(
required=False,
default=None,
choices=self.choices
),
device_warning_traps=dict(
required=False,
default=None,
choices=self.choices
),
location=dict(
required=False,
default=None
)
)
self.f5_product_name = 'bigip'
def main():
if not HAS_F5SDK:
raise F5ModuleError("The python f5-sdk module is required")
spec = ArgumentSpec()
client = AnsibleF5Client(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
f5_product_name=spec.f5_product_name
)
try:
mm = ModuleManager(client)
results = mm.exec_module()
client.module.exit_json(**results)
except F5ModuleError as e:
client.module.fail_json(msg=str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
harshita-gupta/Harvard-FRSEM-Catalog-2016-17 | flask/lib/python2.7/site-packages/flask/globals.py | 322 | 1645 | # -*- coding: utf-8 -*-
"""
flask.globals
~~~~~~~~~~~~~
Defines all the global objects that are proxies to the current
active context.
:copyright: (c) 2015 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from functools import partial
from werkzeug.local import LocalStack, LocalProxy
_request_ctx_err_msg = '''\
Working outside of request context.
This typically means that you attempted to use functionality that needed
an active HTTP request. Consult the documentation on testing for
information about how to avoid this problem.\
'''
_app_ctx_err_msg = '''\
Working outside of application context.
This typically means that you attempted to use functionality that needed
to interface with the current application object in a way. To solve
this set up an application context with app.app_context(). See the
documentation for more information.\
'''
def _lookup_req_object(name):
top = _request_ctx_stack.top
if top is None:
raise RuntimeError(_request_ctx_err_msg)
return getattr(top, name)
def _lookup_app_object(name):
top = _app_ctx_stack.top
if top is None:
raise RuntimeError(_app_ctx_err_msg)
return getattr(top, name)
def _find_app():
top = _app_ctx_stack.top
if top is None:
raise RuntimeError(_app_ctx_err_msg)
return top.app
# context locals
_request_ctx_stack = LocalStack()
_app_ctx_stack = LocalStack()
current_app = LocalProxy(_find_app)
request = LocalProxy(partial(_lookup_req_object, 'request'))
session = LocalProxy(partial(_lookup_req_object, 'session'))
g = LocalProxy(partial(_lookup_app_object, 'g'))
| mit |
mxtthias/mcomix | mcomix/file_chooser_base_dialog.py | 2 | 11047 | """filechooser_chooser_base_dialog.py - Custom FileChooserDialog implementations."""
import os
import mimetypes
import fnmatch
import gtk
import pango
from mcomix.preferences import prefs
from mcomix import image_tools
from mcomix import labels
from mcomix import constants
from mcomix import log
from mcomix import thumbnail_tools
mimetypes.init()
class _BaseFileChooserDialog(gtk.Dialog):
"""We roll our own FileChooserDialog because the one in GTK seems
buggy with the preview widget. The <action> argument dictates what type
of filechooser dialog we want (i.e. it is gtk.FILE_CHOOSER_ACTION_OPEN
or gtk.FILE_CHOOSER_ACTION_SAVE).
This is a base class for the _MainFileChooserDialog, the
_LibraryFileChooserDialog and the SimpleFileChooserDialog.
Subclasses should implement a method files_chosen(paths) that will be
called once the filechooser has done its job and selected some files.
If the dialog was closed or Cancel was pressed, <paths> is the empty list.
"""
_last_activated_file = None
def __init__(self, action=gtk.FILE_CHOOSER_ACTION_OPEN):
self._action = action
self._destroyed = False
if action == gtk.FILE_CHOOSER_ACTION_OPEN:
title = _('Open')
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_OPEN, gtk.RESPONSE_OK)
else:
title = _('Save')
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK)
gtk.Dialog.__init__(self, title, None, 0, buttons)
self.set_default_response(gtk.RESPONSE_OK)
self.set_has_separator(False)
self.filechooser = gtk.FileChooserWidget(action=action)
self.filechooser.set_size_request(680, 420)
self.vbox.pack_start(self.filechooser)
self.set_border_width(4)
self.filechooser.set_border_width(6)
self.connect('response', self._response)
self.filechooser.connect('file_activated', self._response,
gtk.RESPONSE_OK)
preview_box = gtk.VBox(False, 10)
preview_box.set_size_request(130, 0)
self._preview_image = gtk.Image()
self._preview_image.set_size_request(130, 130)
preview_box.pack_start(self._preview_image, False, False)
self.filechooser.set_preview_widget(preview_box)
self._namelabel = labels.FormattedLabel(weight=pango.WEIGHT_BOLD,
scale=pango.SCALE_SMALL)
self._namelabel.set_ellipsize(pango.ELLIPSIZE_MIDDLE)
preview_box.pack_start(self._namelabel, False, False)
self._sizelabel = labels.FormattedLabel(scale=pango.SCALE_SMALL)
self._sizelabel.set_ellipsize(pango.ELLIPSIZE_MIDDLE)
preview_box.pack_start(self._sizelabel, False, False)
self.filechooser.set_use_preview_label(False)
preview_box.show_all()
self.filechooser.connect('update-preview', self._update_preview)
self._all_files_filter = self.add_filter(
_('All files'), [], ['*'])
# Determine which types should go into 'All archives' based on
# extractor availability.
mimetypes = constants.ZIP_FORMATS[0] + constants.TAR_FORMATS[0]
patterns = constants.ZIP_FORMATS[1] + constants.TAR_FORMATS[1]
if constants.RAR_AVAILABLE():
mimetypes += constants.RAR_FORMATS[0]
patterns += constants.RAR_FORMATS[1]
if constants.SZIP_AVAILABLE():
mimetypes += constants.SZIP_FORMATS[0]
patterns += constants.SZIP_FORMATS[1]
if constants.LHA_AVAILABLE():
mimetypes += constants.LHA_FORMATS[0]
patterns += constants.LHA_FORMATS[1]
self.add_filter(_('All Archives'),
mimetypes, patterns)
self.add_filter(_('ZIP archives'),
*constants.ZIP_FORMATS)
self.add_filter(_('Tar archives'),
*constants.TAR_FORMATS)
if constants.RAR_AVAILABLE():
self.add_filter(_('RAR archives'),
*constants.RAR_FORMATS)
if constants.SZIP_AVAILABLE():
self.add_filter(_('7z archives'),
*constants.SZIP_FORMATS)
if constants.LHA_AVAILABLE():
self.add_filter(_('LHA archives'),
*constants.LHA_FORMATS)
try:
current_file = self._current_file()
last_file = self.__class__._last_activated_file
# If a file is currently open, use its path
if current_file and os.path.exists(current_file):
self.filechooser.set_current_folder(os.path.dirname(current_file))
# If no file is open, use the last stored file
elif (last_file and os.path.exists(last_file)):
self.filechooser.set_filename(last_file)
# If no file was stored yet, fall back to preferences
elif os.path.isdir(prefs['path of last browsed in filechooser']):
self.filechooser.set_current_folder(
prefs['path of last browsed in filechooser'])
except Exception, ex: # E.g. broken prefs values.
log.debug(ex)
self.show_all()
def add_filter(self, name, mimes, patterns=[]):
"""Add a filter, called <name>, for each mime type in <mimes> and
each pattern in <patterns> to the filechooser.
"""
ffilter = gtk.FileFilter()
ffilter.add_custom(
gtk.FILE_FILTER_FILENAME|gtk.FILE_FILTER_MIME_TYPE,
self._filter, (patterns, mimes))
ffilter.set_name(name)
self.filechooser.add_filter(ffilter)
return ffilter
def _filter(self, filter_info, data):
""" Callback function used to determine if a file
should be filtered or not. C{data} is a tuple containing
(patterns, mimes) that should pass the test. Returns True
if the file passed in C{filter_info} should be displayed. """
path, uri, display, mime = filter_info
match_patterns, match_mimes = data
matches_mime = bool(filter(
lambda match_mime: match_mime == mime,
match_mimes))
matches_pattern = bool(filter(
lambda match_pattern: fnmatch.fnmatch(path, match_pattern),
match_patterns))
return matches_mime or matches_pattern
def collect_files_from_subdir(self, path, filter, recursive=False):
""" Finds archives within C{path} that match the
L{gtk.FileFilter} passed in C{filter}. """
for root, dirs, files in os.walk(path):
for file in files:
full_path = os.path.join(root, file)
mimetype = mimetypes.guess_type(full_path)[0] or 'application/octet-stream'
if (filter == self._all_files_filter or
filter.filter((full_path.encode('utf-8'),
None, None, mimetype))):
yield full_path
if not recursive:
break
def set_save_name(self, name):
self.filechooser.set_current_name(name)
def set_current_directory(self, path):
self.filechooser.set_current_folder(path)
def should_open_recursive(self):
return False
def _response(self, widget, response):
"""Return a list of the paths of the chosen files, or None if the
event only changed the current directory.
"""
if response == gtk.RESPONSE_OK:
if not self.filechooser.get_filenames():
return
# Collect files, if necessary also from subdirectories
filter = self.filechooser.get_filter()
paths = [ ]
for path in self.filechooser.get_filenames():
path = path.decode('utf-8')
if os.path.isdir(path):
paths.extend(self.collect_files_from_subdir(path, filter,
self.should_open_recursive()))
else:
paths.append(path)
# FileChooser.set_do_overwrite_confirmation() doesn't seem to
# work on our custom dialog, so we use a simple alternative.
first_path = self.filechooser.get_filenames()[0].decode('utf-8')
if (self._action == gtk.FILE_CHOOSER_ACTION_SAVE and
not os.path.isdir(first_path) and
os.path.exists(first_path)):
overwrite_dialog = gtk.MessageDialog(None, 0,
gtk.MESSAGE_QUESTION, gtk.BUTTONS_OK_CANCEL,
_("A file named '%s' already exists. Do you want to replace it?") %
os.path.basename(first_path))
overwrite_dialog.format_secondary_text(
_('Replacing it will overwrite its contents.'))
response = overwrite_dialog.run()
overwrite_dialog.destroy()
if response != gtk.RESPONSE_OK:
self.emit_stop_by_name('response')
return
prefs['path of last browsed in filechooser'] = \
self.filechooser.get_current_folder()
self.__class__._last_activated_file = first_path
self.files_chosen(paths)
else:
self.files_chosen([])
self._destroyed = True
def _update_preview(self, *args):
if self.filechooser.get_preview_filename():
path = self.filechooser.get_preview_filename().decode('utf-8')
else:
path = None
if path and os.path.isfile(path):
thumbnailer = thumbnail_tools.Thumbnailer()
thumbnailer.set_size(128, 128)
thumbnailer.thumbnail_finished += self._preview_thumbnail_finished
thumbnailer.thumbnail(path, async=True)
else:
self._preview_image.clear()
self._namelabel.set_text('')
self._sizelabel.set_text('')
def _preview_thumbnail_finished(self, filepath, pixbuf):
""" Called when the thumbnailer has finished creating
the thumbnail for <filepath>. """
if self._destroyed:
return
current_path = self.filechooser.get_preview_filename()
if current_path and current_path.decode('utf-8') == filepath:
if pixbuf is None:
self._preview_image.clear()
self._namelabel.set_text('')
self._sizelabel.set_text('')
else:
pixbuf = image_tools.add_border(pixbuf, 1)
self._preview_image.set_from_pixbuf(pixbuf)
self._namelabel.set_text(os.path.basename(filepath))
self._sizelabel.set_text(
'%.1f KiB' % (os.stat(filepath).st_size / 1024.0))
def _current_file(self):
# XXX: This method defers the import of main to avoid cyclic imports
# during startup.
from mcomix import main
return main.main_window().filehandler.get_path_to_base()
# vim: expandtab:sw=4:ts=4
| gpl-2.0 |
n1ck3/dzentinel | lib/python3.3/site-packages/setuptools/tests/test_markerlib.py | 449 | 2506 | import os
import unittest
from setuptools.tests.py26compat import skipIf
try:
import ast
except ImportError:
pass
class TestMarkerlib(unittest.TestCase):
@skipIf('ast' not in globals(),
"ast not available (Python < 2.6?)")
def test_markers(self):
from _markerlib import interpret, default_environment, compile
os_name = os.name
self.assertTrue(interpret(""))
self.assertTrue(interpret("os.name != 'buuuu'"))
self.assertTrue(interpret("os_name != 'buuuu'"))
self.assertTrue(interpret("python_version > '1.0'"))
self.assertTrue(interpret("python_version < '5.0'"))
self.assertTrue(interpret("python_version <= '5.0'"))
self.assertTrue(interpret("python_version >= '1.0'"))
self.assertTrue(interpret("'%s' in os.name" % os_name))
self.assertTrue(interpret("'%s' in os_name" % os_name))
self.assertTrue(interpret("'buuuu' not in os.name"))
self.assertFalse(interpret("os.name == 'buuuu'"))
self.assertFalse(interpret("os_name == 'buuuu'"))
self.assertFalse(interpret("python_version < '1.0'"))
self.assertFalse(interpret("python_version > '5.0'"))
self.assertFalse(interpret("python_version >= '5.0'"))
self.assertFalse(interpret("python_version <= '1.0'"))
self.assertFalse(interpret("'%s' not in os.name" % os_name))
self.assertFalse(interpret("'buuuu' in os.name and python_version >= '5.0'"))
self.assertFalse(interpret("'buuuu' in os_name and python_version >= '5.0'"))
environment = default_environment()
environment['extra'] = 'test'
self.assertTrue(interpret("extra == 'test'", environment))
self.assertFalse(interpret("extra == 'doc'", environment))
def raises_nameError():
try:
interpret("python.version == '42'")
except NameError:
pass
else:
raise Exception("Expected NameError")
raises_nameError()
def raises_syntaxError():
try:
interpret("(x for x in (4,))")
except SyntaxError:
pass
else:
raise Exception("Expected SyntaxError")
raises_syntaxError()
statement = "python_version == '5'"
self.assertEqual(compile(statement).__doc__, statement)
| gpl-2.0 |
hugochan/KATE | run_doc_word2vec.py | 1 | 1505 | '''
Created on Jan, 2017
@author: hugo
'''
from __future__ import absolute_import
import argparse
from os import path
import numpy as np
from autoencoder.preprocessing.preprocessing import load_corpus
from autoencoder.baseline.doc_word2vec import load_w2v, doc_word2vec, get_similar_words
from autoencoder.utils.io_utils import write_file
from autoencoder.utils.op_utils import revdict
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--corpus', required=True, type=str, help='path to the corpus file')
parser.add_argument('-mf', '--mod_file', required=True, type=str, help='path to the word2vec mod file')
parser.add_argument('-sw', '--sample_words', type=str, help='path to the output sample words file')
parser.add_argument('-o', '--output', type=str, help='path to the output doc codes file')
args = parser.parse_args()
corpus = load_corpus(args.corpus)
docs, vocab_dict = corpus['docs'], corpus['vocab']
w2v = load_w2v(args.mod_file)
# doc_codes = doc_word2vec(w2v, docs, revdict(vocab_dict), args.output, avg=True)
if args.sample_words:
queries = ['weapon', 'christian', 'compani', 'israel', 'law', 'hockey', 'comput', 'space']
words = []
for each in queries:
words.append(get_similar_words(w2v, each, topn=5))
write_file(words, args.sample_words)
print 'Saved sample words file to %s' % args.sample_words
import pdb;pdb.set_trace()
if __name__ == '__main__':
main()
| bsd-3-clause |
arunjitsingh/playlist | storage/models.py | 1 | 1433 | # Copyright 2012 Arunjit Singh. All Rights Reserved.
"""Application models.
There are 3 basic models:
Song: duh.
Playlist: Stores a list of references to Song (a la "collection")
User: Stores a list of references to playlist (a la "collection")
Each model has an ancestor, which is its immediate parent collection:
Song ----ancestor---> Playlist ----ancestor----> User
The 'user_id' (str) stored by the User model is App Engine's user.user_id()
"""
__author__ = 'Arunjit Singh <[email protected]>'
from google.appengine.ext import ndb
class User(ndb.Model):
user_id = ndb.StringProperty(required=True)
playlists = ndb.ListProperty(ndb.Key, default=[])
class Playlist(ndb.Model):
playlist_id = ndb.StringProperty(required=True)
songs = ndb.ListProperty(ndb.Key, default=[])
user = ndb.ReferenceProperty(User)
class Song(ndb.Model):
song_id = ndb.StringProperty(required=True)
artist = ndb.StringProperty()
title = ndb.StringProperty()
album = ndb.StringProperty()
year = ndb.StringProperty() # or IntegerProperty?
duration = ndb.IntegerProperty() # or StringProperty?
blob_ref = ndb.BlobProperty()
playlist = ndb.ReferenceProperty(Playlist)
# Alias some common db stuff
GqlQuery = ndb.GqlQuery
Query = ndb.Query
QueryNotFoundError = ndb.QueryNotFoundError
ReferencePropertyResolveError = ndb.ReferencePropertyResolveError
# TODO(arunjit): add defs to get Songs/Playlists for a user.
| mit |
jlmadurga/microbot | permabots/test/factories/handler.py | 2 | 1121 | # coding=utf-8
from factory import DjangoModelFactory, SubFactory, Sequence
from permabots.models import Handler, Request, UrlParam, HeaderParam
from permabots.test.factories import BotFactory, ResponseFactory
class RequestFactory(DjangoModelFactory):
class Meta:
model = Request
url_template = "https://api.github.com/users/jlmadurga"
method = Request.GET
class UrlParamFactory(DjangoModelFactory):
class Meta:
model = UrlParam
key = Sequence(lambda n: 'key%d' % n)
value_template = Sequence(lambda n: '{{value%d}}' % n)
request = SubFactory(RequestFactory)
class HeaderParamFactory(DjangoModelFactory):
class Meta:
model = HeaderParam
key = Sequence(lambda n: 'key%d' % n)
value_template = Sequence(lambda n: '{{value%d}}' % n)
request = SubFactory(RequestFactory)
class HandlerFactory(DjangoModelFactory):
class Meta:
model = Handler
bot = SubFactory(BotFactory)
name = Sequence(lambda n: 'name%d' % n)
pattern = "/github_user"
request = SubFactory(RequestFactory)
response = SubFactory(ResponseFactory) | bsd-3-clause |
abhishek-ch/hue | desktop/core/ext-py/South-1.0.2/south/db/oracle.py | 91 | 14581 | from __future__ import print_function
import os.path
import sys
import re
import warnings
import cx_Oracle
from django.db import connection, models
from django.db.backends.util import truncate_name
from django.core.management.color import no_style
from django.db.models.fields import NOT_PROVIDED
from django.db.utils import DatabaseError
# In revision r16016 function get_sequence_name has been transformed into
# method of DatabaseOperations class. To make code backward-compatible we
# need to handle both situations.
try:
from django.db.backends.oracle.base import get_sequence_name\
as original_get_sequence_name
except ImportError:
original_get_sequence_name = None
from south.db import generic
class DatabaseOperations(generic.DatabaseOperations):
"""
Oracle implementation of database operations.
"""
backend_name = 'oracle'
alter_string_set_type = 'ALTER TABLE %(table_name)s MODIFY %(column)s %(type)s %(nullity)s;'
alter_string_set_default = 'ALTER TABLE %(table_name)s MODIFY %(column)s DEFAULT %(default)s;'
alter_string_update_nulls_to_default = \
'UPDATE %(table_name)s SET %(column)s = %(default)s WHERE %(column)s IS NULL;'
add_column_string = 'ALTER TABLE %s ADD %s;'
delete_column_string = 'ALTER TABLE %s DROP COLUMN %s;'
add_constraint_string = 'ALTER TABLE %(table_name)s ADD CONSTRAINT %(constraint)s %(clause)s'
allows_combined_alters = False
has_booleans = False
constraints_dict = {
'P': 'PRIMARY KEY',
'U': 'UNIQUE',
'C': 'CHECK',
'R': 'FOREIGN KEY'
}
def get_sequence_name(self, table_name):
if original_get_sequence_name is None:
return self._get_connection().ops._get_sequence_name(table_name)
else:
return original_get_sequence_name(table_name)
#TODO: This will cause very obscure bugs if anyone uses a column name or string value
# that looks like a column definition (with 'CHECK', 'DEFAULT' and/or 'NULL' in it)
# e.g. "CHECK MATE" varchar(10) DEFAULT 'NULL'
def adj_column_sql(self, col):
# Syntax fixes -- Oracle is picky about clause order
col = re.sub('(?P<constr>CHECK \(.*\))(?P<any>.*)(?P<default>DEFAULT \d+)',
lambda mo: '%s %s%s'%(mo.group('default'), mo.group('constr'), mo.group('any')), col) #syntax fix for boolean/integer field only
col = re.sub('(?P<not_null>(NOT )?NULL) (?P<misc>(.* )?)(?P<default>DEFAULT.+)',
lambda mo: '%s %s %s'%(mo.group('default'),mo.group('not_null'),mo.group('misc') or ''), col) #fix order of NULL/NOT NULL and DEFAULT
return col
def check_meta(self, table_name):
return table_name in [ m._meta.db_table for m in models.get_models() ] #caching provided by Django
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
@generic.invalidate_table_constraints
def create_table(self, table_name, fields):
qn = self.quote_name(table_name)
columns = []
autoinc_sql = ''
for field_name, field in fields:
field = self._field_sanity(field)
# avoid default values in CREATE TABLE statements (#925)
field._suppress_default = True
col = self.column_sql(table_name, field_name, field)
if not col:
continue
col = self.adj_column_sql(col)
columns.append(col)
if isinstance(field, models.AutoField):
autoinc_sql = connection.ops.autoinc_sql(table_name, field_name)
sql = 'CREATE TABLE %s (%s);' % (qn, ', '.join([col for col in columns]))
self.execute(sql)
if autoinc_sql:
self.execute(autoinc_sql[0])
self.execute(autoinc_sql[1])
@generic.invalidate_table_constraints
def delete_table(self, table_name, cascade=True):
qn = self.quote_name(table_name)
# Note: PURGE is not valid syntax for Oracle 9i (it was added in 10)
if cascade:
self.execute('DROP TABLE %s CASCADE CONSTRAINTS;' % qn)
else:
self.execute('DROP TABLE %s;' % qn)
# If the table has an AutoField a sequence was created.
sequence_sql = """
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.get_sequence_name(table_name)}
self.execute(sequence_sql)
@generic.invalidate_table_constraints
def alter_column(self, table_name, name, field, explicit_name=True, ignore_constraints=False):
if self.dry_run:
if self.debug:
print(' - no dry run output for alter_column() due to dynamic DDL, sorry')
return
qn = self.quote_name(table_name)
# hook for the field to do any resolution prior to it's attributes being queried
if hasattr(field, 'south_init'):
field.south_init()
field = self._field_sanity(field)
# Add _id or whatever if we need to
field.set_attributes_from_name(name)
if not explicit_name:
name = field.column
qn_col = self.quote_name(name)
# First, change the type
# This will actually also add any CHECK constraints needed,
# since e.g. 'type' for a BooleanField is 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))'
params = {
'table_name':qn,
'column': qn_col,
'type': self._db_type_for_alter_column(field),
'nullity': 'NOT NULL',
'default': 'NULL'
}
if field.null:
params['nullity'] = 'NULL'
sql_templates = [
(self.alter_string_set_type, params, []),
(self.alter_string_set_default, params, []),
]
if not field.null and field.has_default():
# Use default for rows that had nulls. To support the case where
# the new default does not fit the old type, we need to first change
# the column type to the new type, but null=True; then set the default;
# then complete the type change.
def change_params(**kw):
"A little helper for non-destructively changing the params"
p = params.copy()
p.update(kw)
return p
sql_templates[:0] = [
(self.alter_string_set_type, change_params(nullity='NULL'),[]),
(self.alter_string_update_nulls_to_default, change_params(default="%s"), [field.get_default()]),
]
if not ignore_constraints:
# drop CHECK constraints. Make sure this is executed before the ALTER TABLE statements
# generated above, since those statements recreate the constraints we delete here.
check_constraints = self._constraints_affecting_columns(table_name, [name], "CHECK")
for constraint in check_constraints:
self.execute(self.delete_check_sql % {
'table': self.quote_name(table_name),
'constraint': self.quote_name(constraint),
})
# Drop foreign constraints
try:
self.delete_foreign_key(qn, qn_col)
except ValueError:
# There weren't any
pass
for sql_template, params, args in sql_templates:
try:
self.execute(sql_template % params, args, print_all_errors=False)
except DatabaseError as exc:
description = str(exc)
# Oracle complains if a column is already NULL/NOT NULL
if 'ORA-01442' in description or 'ORA-01451' in description:
# so we just drop NULL/NOT NULL part from target sql and retry
params['nullity'] = ''
sql = sql_template % params
self.execute(sql)
# Oracle also has issues if we try to change a regular column
# to a LOB or vice versa (also REF, object, VARRAY or nested
# table, but these don't come up much in Django apps)
elif 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_column_lob_workaround(table_name, name, field)
else:
self._print_sql_error(exc, sql_template % params)
raise
if not ignore_constraints:
# Add back FK constraints if needed
if field.rel: #and self.supports_foreign_keys:
self.add_deferred_sql(
self.foreign_key_sql(
qn[1:-1], # foreign_key_sql uses this as part of constraint name
qn_col[1:-1], # foreign_key_sql uses this as part of constraint name
field.rel.to._meta.db_table,
field.rel.to._meta.get_field(field.rel.field_name).column
)
)
def _alter_column_lob_workaround(self, table_name, name, field):
"""
Oracle refuses to change a column type from/to LOB to/from a regular
column. In Django, this shows up when the field is changed from/to
a TextField.
What we need to do instead is:
- Rename the original column
- Add the desired field as new
- Update the table to transfer values from old to new
- Drop old column
"""
renamed = self._generate_temp_name(name)
self.rename_column(table_name, name, renamed)
self.add_column(table_name, name, field, keep_default=False)
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(table_name),
self.quote_name(name),
self.quote_name(renamed),
))
self.delete_column(table_name, renamed)
def _generate_temp_name(self, for_name):
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
@generic.copy_column_constraints #TODO: Appears to be nulled by the delete decorator below...
@generic.delete_column_constraints
def rename_column(self, table_name, old, new):
if old == new:
# Short-circuit out
return []
self.execute('ALTER TABLE %s RENAME COLUMN %s TO %s;' % (
self.quote_name(table_name),
self.quote_name(old),
self.quote_name(new),
))
@generic.invalidate_table_constraints
def add_column(self, table_name, name, field, keep_default=False):
field = self._field_sanity(field)
sql = self.column_sql(table_name, name, field)
sql = self.adj_column_sql(sql)
if sql:
params = (
self.quote_name(table_name),
sql
)
sql = self.add_column_string % params
self.execute(sql)
# Now, drop the default if we need to
if field.default is not None:
field.default = NOT_PROVIDED
self.alter_column(table_name, name, field, explicit_name=False, ignore_constraints=True)
def delete_column(self, table_name, name):
return super(DatabaseOperations, self).delete_column(self.quote_name(table_name), name)
def lookup_constraint(self, db_name, table_name, column_name=None):
if column_name:
# Column names in the constraint cache come from the database,
# make sure we use the properly shortened/uppercased version
# for lookup.
column_name = self.normalize_name(column_name)
return super(DatabaseOperations, self).lookup_constraint(db_name, table_name, column_name)
def _constraints_affecting_columns(self, table_name, columns, type="UNIQUE"):
if columns:
columns = [self.normalize_name(c) for c in columns]
return super(DatabaseOperations, self)._constraints_affecting_columns(table_name, columns, type)
def _field_sanity(self, field):
"""
This particular override stops us sending DEFAULTs for BooleanField.
"""
if isinstance(field, models.BooleanField) and field.has_default():
field.default = int(field.to_python(field.get_default()))
# On Oracle, empty strings are null
if isinstance(field, (models.CharField, models.TextField)):
field.null = field.empty_strings_allowed
return field
def _default_value_workaround(self, value):
from datetime import date,time,datetime
if isinstance(value, (date,time,datetime)):
return "'%s'" % value
else:
return super(DatabaseOperations, self)._default_value_workaround(value)
def _fill_constraint_cache(self, db_name, table_name):
self._constraint_cache.setdefault(db_name, {})
self._constraint_cache[db_name][table_name] = {}
rows = self.execute("""
SELECT user_cons_columns.constraint_name,
user_cons_columns.column_name,
user_constraints.constraint_type
FROM user_constraints
JOIN user_cons_columns ON
user_constraints.table_name = user_cons_columns.table_name AND
user_constraints.constraint_name = user_cons_columns.constraint_name
WHERE user_constraints.table_name = '%s'
""" % self.normalize_name(table_name))
for constraint, column, kind in rows:
self._constraint_cache[db_name][table_name].setdefault(column, set())
self._constraint_cache[db_name][table_name][column].add((self.constraints_dict[kind], constraint))
return
| apache-2.0 |
dbreen/games | games/utils/decorators.py | 1 | 1299 | from functools import wraps
from django.http import HttpResponse
from django.template.context import RequestContext
from django.shortcuts import render_to_response
from django.utils import simplejson
def render_to(template=None, mimetype=None):
def renderer(function):
@wraps(function)
def wrapper(request, *args, **kwargs):
output = function(request, *args, **kwargs)
if not isinstance(output, dict):
return output
tmpl = output.pop('TEMPLATE', template)
return render_to_response(tmpl, output,
context_instance=RequestContext(request), mimetype=mimetype)
return wrapper
return renderer
class JsonResponse(HttpResponse):
"""
HttpResponse descendant, which return response with ``application/json`` mimetype.
"""
def __init__(self, data):
super(JsonResponse, self).__init__(content=simplejson.dumps(data), mimetype='application/json')
def ajax_request(func):
@wraps(func)
def wrapper(request, *args, **kwargs):
response = func(request, *args, **kwargs)
if isinstance(response, dict):
return JsonResponse(response)
else:
return response
return wrapper
| mit |
amatyja/AliPhysics | PWGMM/MC/aligenqa/aligenqa/settings.py | 41 | 1127 | # A list of the estimators which will be included in the plots (if available in the input file)
considered_ests = [
'EtaLt05',
'EtaLt08',
'EtaLt15',
'Eta08_15',
'V0M',
# 'V0A',
'V0C',
'ZDC',
'nMPI',
'Q2',
'spherocity',
'sphericity'
]
# Considered triggers
considered_triggers = [
'Inel',
'InelGt0',
'V0AND'
]
# Ranges of percentiles which should be considered in the plots These
# ranges are then translated into ranges of bins in multiplicity, nMPI
# or whatever is applicable
std_perc_bins = [(1, 0.7), (.5, .4), (.1, .05), (0.001, 0.0)]
percentile_bins = {
'EtaLt05': std_perc_bins,
'EtaLt08': std_perc_bins,
'EtaLt15': std_perc_bins,
'Eta08_15': std_perc_bins,
'V0M': std_perc_bins,
'V0A': std_perc_bins,
'V0C': std_perc_bins,
'ZDC': [(1, 0.7), (.7, .3), (.3, .05), (0.001, 0.0)],
'nMPI': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)],
'Q2': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)],
'spherocity': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)],
'sphericity': [(1, 0.7), (.7, .4), (.3, .05), (0.001, 0.0)],
}
| bsd-3-clause |
jonparrott/gcloud-python | storage/noxfile.py | 2 | 3569 | # Copyright 2016 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import os
import nox
LOCAL_DEPS = (
os.path.join('..', 'api_core'),
os.path.join('..', 'core'),
)
@nox.session
def default(session):
"""Default unit test session.
This is intended to be run **without** an interpreter set, so
that the current ``python`` (on the ``PATH``) or the version of
Python corresponding to the ``nox`` binary the ``PATH`` can
run the tests.
"""
# Install all test dependencies, then install local packages in-place.
session.install('mock', 'pytest', 'pytest-cov')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
session.install('-e', '.')
# Run py.test against the unit tests.
session.run(
'py.test',
'--quiet',
'--cov=google.cloud.storage',
'--cov=tests.unit',
'--cov-append',
'--cov-config=.coveragerc',
'--cov-report=',
'--cov-fail-under=97',
'tests/unit',
*session.posargs
)
@nox.session(python=['2.7', '3.5', '3.6', '3.7'])
def unit(session):
"""Run the unit test suite."""
default(session)
@nox.session(python=['2.7', '3.6'])
def system(session):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Use pre-release gRPC for system tests.
session.install('--pre', 'grpcio')
# Install all test dependencies, then install local packages in-place.
session.install('mock', 'pytest')
for local_dep in LOCAL_DEPS:
session.install('-e', local_dep)
systest_deps = [
'../test_utils/',
'../pubsub',
'../kms',
]
for systest_dep in systest_deps:
session.install('-e', systest_dep)
session.install('-e', '.')
# Run py.test against the system tests.
session.run('py.test', '--quiet', 'tests/system.py', *session.posargs)
@nox.session(python='3.6')
def lint(session):
"""Run linters.
Returns a failure if the linters find linting errors or sufficiently
serious code quality issues.
"""
session.install('flake8', *LOCAL_DEPS)
session.install('.')
session.run('flake8', 'google', 'tests')
@nox.session(python='3.6')
def lint_setup_py(session):
"""Verify that setup.py is valid (including RST check)."""
session.install('docutils', 'Pygments')
session.run(
'python', 'setup.py', 'check', '--restructuredtext', '--strict')
@nox.session(python='3.6')
def cover(session):
"""Run the final coverage report.
This outputs the coverage report aggregating coverage from the unit
test runs (not system test runs), and then erases coverage data.
"""
session.install('coverage', 'pytest-cov')
session.run('coverage', 'report', '--show-missing', '--fail-under=100')
session.run('coverage', 'erase')
| apache-2.0 |
timothycrosley/connectable | connectable/base.py | 1 | 5518 | """connectable/Connectable.py
Connectable enables child object to create dynamic connections
(via signals/slots) at run-time. Inspired by QT's signal / slot mechanism
"""
class CombineSignals(type):
'''A meta class to automatically combine signals from base classes'''
def __new__(metaclass, name, parents, class_dict, *kargs, **kwargs):
if 'signals' in class_dict and parents and getattr(parents[0], 'signals', None):
class_dict['signals'] = parents[0].signals + class_dict['signals']
return super(CombineSignals, metaclass).__new__(metaclass, name, parents, class_dict, *kargs, **kwargs)
class Connectable(object, metaclass=CombineSignals):
__slots__ = ("connections")
signals = ()
def emit(self, signal, value=None, gather=False):
"""Emits a signal, causing all slot methods connected with the signal to be called (optionally w/ related value)
signal: the name of the signal to emit, must be defined in the classes 'signals' list.
value: the value to pass to all connected slot methods.
gather: if set, causes emit to return a list of all slot results
"""
results = [] if gather else True
if hasattr(self, 'connections') and signal in self.connections:
for condition, values in self.connections[signal].items():
if condition is None or condition == value or (callable(condition) and condition(value)):
for slot, transform in values.items():
if transform is not None:
if callable(transform):
used_value = transform(value)
elif isinstance(transform, str):
used_value = transform.format(value=value)
else:
used_value = transform
else:
used_value = value
if used_value is not None:
if(accept_arguments(slot, 1)):
result = slot(used_value)
elif(accept_arguments(slot, 0)):
result = slot()
else:
result = ''
else:
result = slot()
if gather:
results.append(result)
return results
def connect(self, signal, slot, transform=None, condition=None):
"""Defines a connection between this objects signal and another objects slot
signal: the signal this class will emit, to cause the slot method to be called
receiver: the object containing the slot method to be called
slot: the slot method to call
transform: an optional value override to pass into the slot method as the first variable
condition: only call the slot if the value emitted matches the required value or calling required returns True
"""
if not signal in self.signals:
print("WARNING: {0} is trying to connect a slot to an undefined signal: {1}".format(self.__class__.__name__,
str(signal)))
return
if not hasattr(self, 'connections'):
self.connections = {}
connection = self.connections.setdefault(signal, {})
connection = connection.setdefault(condition, {})
connection[slot] = transform
def disconnect(self, signal=None, slot=None, transform=None, condition=None):
"""Removes connection(s) between this objects signal and connected slot(s)
signal: the signal this class will emit, to cause the slot method to be called
receiver: the object containing the slot method to be called
slot: the slot method or function to call
transform: an optional value override to pass into the slot method as the first variable
condition: only call the slot method if the value emitted matches this condition
"""
if slot:
self.connections[signal][condition].pop(slot, None)
elif condition is not None:
self.connections[signal].pop(condition, None)
elif signal:
self.connections.pop(signal, None)
else:
delattr(self, 'connections')
def accept_arguments(method, number_of_arguments=1):
"""Returns True if the given method will accept the given number of arguments
method: the method to perform introspection on
number_of_arguments: the number_of_arguments
"""
if 'method' in method.__class__.__name__:
number_of_arguments += 1
func = getattr(method, 'im_func', getattr(method, '__func__'))
func_defaults = getattr(func, 'func_defaults', getattr(func, '__defaults__'))
number_of_defaults = func_defaults and len(func_defaults) or 0
elif method.__class__.__name__ == 'function':
func_defaults = getattr(method, 'func_defaults', getattr(method, '__defaults__'))
number_of_defaults = func_defaults and len(func_defaults) or 0
coArgCount = getattr(method, 'func_code', getattr(method, '__code__')).co_argcount
if(coArgCount >= number_of_arguments and coArgCount - number_of_defaults <= number_of_arguments):
return True
return False
| mit |
tchernomax/ansible | test/units/modules/network/f5/test_bigip_cli_alias.py | 10 | 3822 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import Mock
from ansible.compat.tests.mock import patch
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_cli_alias import ApiParameters
from library.modules.bigip_cli_alias import ModuleParameters
from library.modules.bigip_cli_alias import ModuleManager
from library.modules.bigip_cli_alias import ArgumentSpec
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
from test.unit.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_cli_alias import ApiParameters
from ansible.modules.network.f5.bigip_cli_alias import ModuleParameters
from ansible.modules.network.f5.bigip_cli_alias import ModuleManager
from ansible.modules.network.f5.bigip_cli_alias import ArgumentSpec
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
command='run /util bash',
description='another description',
scope='shared',
)
p = ModuleParameters(params=args)
assert p.command == 'run /util bash'
assert p.description == 'another description'
assert p.scope == 'shared'
def test_api_parameters(self):
args = load_fixture('load_tm_cli_alias_1.json')
p = ApiParameters(params=args)
assert p.command == 'run /util bash'
assert p.description == 'Run the bash shell'
@patch('ansible.module_utils.f5_utils.AnsibleF5Client._get_mgmt_root',
return_value=True)
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_default_device_group(self, *args):
set_module_args(
dict(
name="foo-group",
command='run /util bash',
description='another description',
scope='shared',
state="present",
server='localhost',
user='admin',
password='password'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
dmnfarrell/peat | PEATDB/plugins/Summary.py | 1 | 3157 | #!/usr/bin/env python
#
# Protein Engineering Analysis Tool DataBase (PEATDB)
# Copyright (C) 2010 Damien Farrell & Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com
# Normal mail:
# Jens Nielsen
# SBBS, Conway Institute
# University College Dublin
# Dublin 4, Ireland
#
try:
from Plugins import Plugin
except:
from PEATDB.Plugins import Plugin
import os, types, copy, pickle
from Tkinter import *
import tkFileDialog
import Pmw
import PEATSA.WebApp.Data
import PEATSA.WebApp.UtilityFunctions
import PEATSA.Core as Core
from PEATDB.Dialogs import MultipleValDialog
from PEATDB.Actions import DBActions
from PEATDB.TableModels import TableModel
from PEATDB.Tables import TableCanvas
import tkMessageBox, tkSimpleDialog
class SummaryPlugin(Plugin):
"""Plugin for PEAT that enables several projects to be summarised together"""
capabilities = ['gui']
requires = ['PEATSA']
menuentry = 'Summary Plugin'
gui_methods = {'help':'Help',
'quit':'Close Window'}
buttonorder = ['createJobDialog','fetchJob','editConfigFile','help','quit']
about = 'This plugin allows several projects to be sumamrised together'
projects = {}
def main(self, parent=None, DB=None):
if parent==None:
if DB!=None:
self.DB = DB
else:
return
else:
self.parent = parent
self.DB = parent.DB
self.parentframe = None
self._doFrame()
return
def _doFrame(self):
self.mainwin = self.parent.createChildFrame(width=460,title='PEATSA Plugin')
methods = self._getmethods()
methods = [m for m in methods if m[0] in self.gui_methods.keys()]
l=Label(self.mainwin, text='PEATSA Interface')
l.pack(side=TOP,fill=BOTH)
self.log = self.createLogWin(self.mainwin)
self.log.pack(side=TOP,fill=BOTH,expand=1)
self.stdout2Log()
return
def add(self, db):
"""Add db to projects"""
name = db.meta.info['project']
self.projects[name] = db
def setup(self, fields):
return
def populate(self):
"""Clear the current data and populate with db data
from dbs in projects dictionary"""
print self.DB
if self.DB == None:
return
for db in self.projects:
recs = db.getRecs()
| mit |
dtamayo/rebound | rebound/tests/test_mercurius.py | 1 | 7209 | import rebound
import unittest
import os
import rebound.data as data
from datetime import datetime
class TestMercurius(unittest.TestCase):
def test_outer_solar(self):
sim = rebound.Simulation()
rebound.data.add_outer_solar_system(sim)
sim.integrator = "mercurius"
P = sim.particles[1].P
sim.dt = 1e-3*P
E0 = sim.calculate_energy()
sim.integrate(1000)
dE = abs((sim.calculate_energy() - E0)/E0)
self.assertLess(dE,2e-10)
def test_outer_solar_massive(self):
sim = rebound.Simulation()
rebound.data.add_outer_solar_system(sim)
for i in range(1,sim.N):
sim.particles[i].m *=50.
sim.integrator = "mercurius"
P = sim.particles[1].P
sim.dt = 1e-3*P
E0 = sim.calculate_energy()
sim.integrate(1000)
dE = abs((sim.calculate_energy() - E0)/E0)
self.assertLess(dE,7e-8)
def test_simple_collision(self):
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1e-5,r=1.6e-4,a=0.5,e=0.1) #these params lead to collision on my machine
sim.add(m=1e-8,r=4e-5,a=0.55,e=0.4,f=-0.94)
mtot0= sum([p.m for p in sim.particles])
com0 = sim.calculate_com()
N0 = sim.N
sim.integrator = "mercurius"
sim.dt = 0.01
sim.track_energy_offset = 1;
sim.collision = "direct"
sim.collision_resolve = "merge"
E0 = sim.calculate_energy()
sim.integrate(1)
com1 = sim.calculate_com()
self.assertAlmostEqual(com1.vx,com0.vx,delta=1e-16)
self.assertAlmostEqual(com1.vy,com0.vy,delta=1e-16)
self.assertAlmostEqual(com1.vz,com0.vz,delta=1e-16)
mtot1= sum([p.m for p in sim.particles])
self.assertEqual(mtot0,mtot1)
dE = abs((sim.calculate_energy() - E0)/E0)
self.assertLess(dE,3e-9)
self.assertEqual(N0-1,sim.N)
def test_planetesimal_collision(self):
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1e-5,r=1.6e-4,a=0.5,e=0.1) #these params lead to collision on my machine
sim.N_active = 2
sim.add(m=1e-8,r=4e-5,a=0.55,e=0.4,f=-0.94)
mtot0= sum([p.m for p in sim.particles])
N0 = sim.N
sim.integrator = "mercurius"
sim.dt = 0.01
sim.testparticle_type = 1
sim.track_energy_offset = 1;
sim.collision = "direct"
sim.collision_resolve = "merge"
E0 = sim.calculate_energy()
sim.integrate(1)
mtot1= sum([p.m for p in sim.particles])
self.assertEqual(mtot0,mtot1)
dE = abs((sim.calculate_energy() - E0)/E0)
self.assertLess(dE,3e-9)
self.assertEqual(N0-1,sim.N)
def test_massive_ejection(self):
sim = rebound.Simulation()
sim.add(m=1.)
sim.add(m=1e-4,r=1.6e-4,a=0.5,e=0.1)
sim.add(m=1e-6,r=4e-5,a=0.6)
sim.particles[2].vy *= 2
sim.N_active = 2
sim.integrator = "mercurius"
sim.dt = 0.01
sim.testparticle_type = 1
sim.collision = "direct"
sim.collision_resolve = "merge"
sim.track_energy_offset = 1;
sim.boundary = "open"
boxsize = 3.
sim.configure_box(boxsize)
E0 = sim.calculate_energy()
sim.integrate(1)
dE = abs((sim.calculate_energy() - E0)/E0)
self.assertLess(dE,4e-6)
def test_collision_with_star_simple(self):
sim = rebound.Simulation()
sim.add(m=1.,r=1.)
sim.add(m=1e-3,r=1.e-3,a=0.5)
mtot0 = sum([p.m for p in sim.particles])
com0 = sim.calculate_com()
N0 = sim.N
sim.integrator = "mercurius"
sim.dt = 0.01
sim.track_energy_offset = 1;
sim.collision = "direct"
sim.collision_resolve = "merge"
E0 = sim.calculate_energy()
sim.integrate(1)
com1 = sim.calculate_com()
self.assertAlmostEqual(com1.vx,com0.vx,delta=1e-16)
self.assertAlmostEqual(com1.vy,com0.vy,delta=1e-16)
self.assertAlmostEqual(com1.vz,com0.vz,delta=1e-16)
mtot1 = sum([p.m for p in sim.particles])
self.assertEqual(mtot0,mtot1)
self.assertEqual(N0-1,sim.N)
dE = abs((sim.calculate_energy() - E0)/E0)
self.assertLess(dE,1e-16)
def test_collision_with_star(self):
sim = rebound.Simulation()
sim.add(m=1.,r=0.00465)
sim.add(m=1e-5,r=1.6e-4,a=0.5,e=0.1,f=2.3)
sim.add(m=1e-4,r=1.4e-3,x=1.,vx=-0.4) # falling onto the star
sim.add(m=1e-5,r=1.6e-4,a=1.5,e=0.1)
mtot0 = sum([p.m for p in sim.particles])
com0 = sim.calculate_com()
N0 = sim.N
sim.integrator = "mercurius"
sim.dt = 0.01
sim.track_energy_offset = 1;
sim.collision = "direct"
sim.collision_resolve = "merge"
E0 = sim.calculate_energy()
sim.integrate(1)
com1 = sim.calculate_com()
self.assertAlmostEqual(com1.vx,com0.vx,delta=1e-16)
self.assertAlmostEqual(com1.vy,com0.vy,delta=1e-16)
self.assertAlmostEqual(com1.vz,com0.vz,delta=1e-16)
mtot1 = sum([p.m for p in sim.particles])
self.assertEqual(mtot0,mtot1)
self.assertEqual(N0-1,sim.N)
dE = abs((sim.calculate_energy() - E0)/E0)
# bad energy conservation due to democratic heliocentric!
self.assertLess(dE,3e-2)
def test_many_encounters(self):
def get_sim():
sim = rebound.Simulation()
sim.add(m=1)
for i in range(6):
sim.add(a=1+0.2*i,e=0.1+0.1*i,f=80.*i,omega=30.*i*i,m=0.0001)
sim.move_to_com()
sim.dt = 0.034
return sim
sim = get_sim()
sim.integrator = "mercurius"
E0 = sim.calculate_energy()
start=datetime.now()
sim.integrate(2000)
time_mercurius = (datetime.now()-start).total_seconds()
dE_mercurius = abs((sim.calculate_energy() - E0)/E0)
sim = get_sim()
sim.integrator = "ias15"
start=datetime.now()
sim.integrate(2000)
time_ias15 = (datetime.now()-start).total_seconds()
dE_ias15 = abs((sim.calculate_energy() - E0)/E0)
sim = get_sim()
sim.integrator = "whfast"
start=datetime.now()
sim.integrate(2000)
time_whfast = (datetime.now()-start).total_seconds()
dE_whfast = abs((sim.calculate_energy() - E0)/E0)
# Note: precision might vary on machine as initializations use cos/sin
# and are therefore machine dependent.
self.assertLess(dE_mercurius,2e-6) # reasonable precision for mercurius
self.assertLess(dE_mercurius/dE_whfast,5e-5) # at least 1e4 times better than whfast
is_travis = 'TRAVIS' in os.environ
if not is_travis: # timing not reliable on TRAVIS
self.assertLess(2.*time_mercurius,time_ias15) # at least 2 times faster than ias15
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
rsmitty/docker | vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py | 1232 | 3478 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
import msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464646464.0,
False,
True,
None,
"someday",
"",
"bytestring",
1328176922000002000,
-2206187877999998000,
0,
-6795364578871345152
]
l1 = [
{ "true": True,
"false": False },
{ "true": "True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": "123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": "1234567890" },
{ True: "true", 8: False, "false": 0 }
]
l = []
l.extend(l0)
l.append(l0)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
packer = msgpack.Packer()
serialized = packer.pack(l[i])
f = open(os.path.join(destdir, str(i) + '.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: msgpack_test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
s05427226/linux | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <[email protected]>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
theheros/kbengine | kbe/res/scripts/common/Lib/test/test_pep277.py | 3 | 7857 | # Test the Unicode versions of normal file functions
# open, os.open, os.stat. os.listdir, os.rename, os.remove, os.mkdir, os.chdir, os.rmdir
import sys, os, unittest
from unicodedata import normalize
from test import support
filenames = [
'1_abc',
'2_ascii',
'3_Gr\xfc\xdf-Gott',
'4_\u0393\u03b5\u03b9\u03ac-\u03c3\u03b1\u03c2',
'5_\u0417\u0434\u0440\u0430\u0432\u0441\u0442\u0432\u0443\u0439\u0442\u0435',
'6_\u306b\u307d\u3093',
'7_\u05d4\u05e9\u05e7\u05e6\u05e5\u05e1',
'8_\u66e8\u66e9\u66eb',
'9_\u66e8\u05e9\u3093\u0434\u0393\xdf',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
'10_\u1fee\u1ffd',
]
# Mac OS X decomposes Unicode names, using Normal Form D.
# http://developer.apple.com/mac/library/qa/qa2001/qa1173.html
# "However, most volume formats do not follow the exact specification for
# these normal forms. For example, HFS Plus uses a variant of Normal Form D
# in which U+2000 through U+2FFF, U+F900 through U+FAFF, and U+2F800 through
# U+2FAFF are not decomposed."
if sys.platform != 'darwin':
filenames.extend([
# Specific code points: NFC(fn), NFD(fn), NFKC(fn) and NFKD(fn) all differents
'11_\u0385\u03d3\u03d4',
'12_\u00a8\u0301\u03d2\u0301\u03d2\u0308', # == NFD('\u0385\u03d3\u03d4')
'13_\u0020\u0308\u0301\u038e\u03ab', # == NFKC('\u0385\u03d3\u03d4')
'14_\u1e9b\u1fc1\u1fcd\u1fce\u1fcf\u1fdd\u1fde\u1fdf\u1fed',
# Specific code points: fn, NFC(fn) and NFKC(fn) all differents
'15_\u1fee\u1ffd\ufad1',
'16_\u2000\u2000\u2000A',
'17_\u2001\u2001\u2001A',
'18_\u2003\u2003\u2003A', # == NFC('\u2001\u2001\u2001A')
'19_\u0020\u0020\u0020A', # '\u0020' == ' ' == NFKC('\u2000') ==
# NFKC('\u2001') == NFKC('\u2003')
])
# Is it Unicode-friendly?
if not os.path.supports_unicode_filenames:
fsencoding = sys.getfilesystemencoding()
try:
for name in filenames:
name.encode(fsencoding)
except UnicodeEncodeError:
raise unittest.SkipTest("only NT+ and systems with "
"Unicode-friendly filesystem encoding")
# Destroy directory dirname and all files under it, to one level.
def deltree(dirname):
# Don't hide legitimate errors: if one of these suckers exists, it's
# an error if we can't remove it.
if os.path.exists(dirname):
# must pass unicode to os.listdir() so we get back unicode results.
for fname in os.listdir(str(dirname)):
os.unlink(os.path.join(dirname, fname))
os.rmdir(dirname)
class UnicodeFileTests(unittest.TestCase):
files = set(filenames)
normal_form = None
def setUp(self):
try:
os.mkdir(support.TESTFN)
except OSError:
pass
files = set()
for name in self.files:
name = os.path.join(support.TESTFN, self.norm(name))
with open(name, 'wb') as f:
f.write((name+'\n').encode("utf-8"))
os.stat(name)
files.add(name)
self.files = files
def tearDown(self):
deltree(support.TESTFN)
def norm(self, s):
if self.normal_form:
return normalize(self.normal_form, s)
return s
def _apply_failure(self, fn, filename, expected_exception,
check_fn_in_exception = True):
with self.assertRaises(expected_exception) as c:
fn(filename)
exc_filename = c.exception.filename
# the "filename" exception attribute may be encoded
if isinstance(exc_filename, bytes):
filename = filename.encode(sys.getfilesystemencoding())
if check_fn_in_exception:
self.assertEqual(exc_filename, filename, "Function '%s(%a) failed "
"with bad filename in the exception: %a" %
(fn.__name__, filename, exc_filename))
def test_failures(self):
# Pass non-existing Unicode filenames all over the place.
for name in self.files:
name = "not_" + name
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
def test_open(self):
for name in self.files:
f = open(name, 'wb')
f.write((name+'\n').encode("utf-8"))
f.close()
os.stat(name)
# Skip the test on darwin, because darwin does normalize the filename to
# NFD (a variant of Unicode NFD form). Normalize the filename to NFC, NFKC,
# NFKD in Python is useless, because darwin will normalize it later and so
# open(), os.stat(), etc. don't raise any exception.
@unittest.skipIf(sys.platform == 'darwin', 'irrevelant test on Mac OS X')
def test_normalize(self):
files = set(self.files)
others = set()
for nf in set(['NFC', 'NFD', 'NFKC', 'NFKD']):
others |= set(normalize(nf, file) for file in files)
others -= files
for name in others:
self._apply_failure(open, name, IOError)
self._apply_failure(os.stat, name, OSError)
self._apply_failure(os.chdir, name, OSError)
self._apply_failure(os.rmdir, name, OSError)
self._apply_failure(os.remove, name, OSError)
# listdir may append a wildcard to the filename, so dont check
self._apply_failure(os.listdir, name, OSError, False)
# Skip the test on darwin, because darwin uses a normalization different
# than Python NFD normalization: filenames are different even if we use
# Python NFD normalization.
@unittest.skipIf(sys.platform == 'darwin', 'irrevelant test on Mac OS X')
def test_listdir(self):
sf0 = set(self.files)
f1 = os.listdir(support.TESTFN.encode(sys.getfilesystemencoding()))
f2 = os.listdir(support.TESTFN)
sf2 = set(os.path.join(support.TESTFN, f) for f in f2)
self.assertEqual(sf0, sf2, "%a != %a" % (sf0, sf2))
self.assertEqual(len(f1), len(f2))
def test_rename(self):
for name in self.files:
os.rename(name, "tmp")
os.rename("tmp", name)
def test_directory(self):
dirname = os.path.join(support.TESTFN, 'Gr\xfc\xdf-\u66e8\u66e9\u66eb')
filename = '\xdf-\u66e8\u66e9\u66eb'
oldwd = os.getcwd()
os.mkdir(dirname)
os.chdir(dirname)
try:
with open(filename, 'wb') as f:
f.write((filename + '\n').encode("utf-8"))
os.access(filename,os.R_OK)
os.remove(filename)
finally:
os.chdir(oldwd)
os.rmdir(dirname)
class UnicodeNFCFileTests(UnicodeFileTests):
normal_form = 'NFC'
class UnicodeNFDFileTests(UnicodeFileTests):
normal_form = 'NFD'
class UnicodeNFKCFileTests(UnicodeFileTests):
normal_form = 'NFKC'
class UnicodeNFKDFileTests(UnicodeFileTests):
normal_form = 'NFKD'
def test_main():
try:
support.run_unittest(
UnicodeFileTests,
UnicodeNFCFileTests,
UnicodeNFDFileTests,
UnicodeNFKCFileTests,
UnicodeNFKDFileTests,
)
finally:
deltree(support.TESTFN)
if __name__ == "__main__":
test_main()
| lgpl-3.0 |
scavarda/mysql-dbcompare | mysql-utilities-1.6.0/scripts/mysqlmetagrep.py | 3 | 5356 | #!/usr/bin/python
#
# Copyright (c) 2010, 2014, Oracle and/or its affiliates. All rights reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; version 2 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
"""
This file contains the metagrep utility which allows users to search metadata.
"""
from mysql.utilities.common.tools import check_python_version
# Check Python version compatibility
check_python_version()
import os.path
import re
import sys
from mysql.utilities.command.grep import ObjectGrep, OBJECT_TYPES
from mysql.utilities.common.tools import check_connector_python
from mysql.utilities.common.options import (add_regexp, setup_common_options,
add_format_option,
add_character_set_option,
get_ssl_dict,
check_password_security)
from mysql.utilities.exception import UtilError
# Check for connector/python
if not check_connector_python():
sys.exit(1)
if __name__ == '__main__':
# Setup the command parser and setup server, help
parser = setup_common_options(os.path.basename(sys.argv[0]),
"mysqlmetagrep - search metadata",
"%prog --server=user:pass@host:port:socket "
"[options] pattern", True)
# Add character set option
add_character_set_option(parser)
# Setup utility-specific options:
parser.add_option("-b", "--body",
dest="check_body", action="store_true", default=False,
help="search the body of routines, triggers, and "
"events as well")
def quote(string):
"""Quote a string
"""
return "'" + string + "'"
# Add some more advanced parsing here to handle types better.
parser.add_option(
'--search-objects', '--object-types',
dest="object_types", default=','.join(OBJECT_TYPES),
help=("the object type to search in: a comma-separated list of one or "
"more of: {0}".format(', '.join([quote(obj_type)
for obj_type in OBJECT_TYPES])))
)
# Add regexp
add_regexp(parser)
parser.add_option(
"-p", "--print-sql", "--sql",
dest="print_sql", action="store_true", default=False,
help="print the statement instead of sending it to the server")
parser.add_option(
"-e", "--pattern",
dest="pattern",
help="pattern to use when matching. Required if the pattern looks "
"like a connection specification.")
parser.add_option(
"--database",
dest="database_pattern", default=None,
help="only look at objects in databases matching this pattern")
# Output format
add_format_option(parser, "display the output in either grid (default), "
"tab, csv, or vertical format", "grid")
options, args = parser.parse_args()
# Check security settings
check_password_security(options, args)
_LOOKS_LIKE_CONNECTION_MSG = """Pattern '{pattern}' looks like a
connection specification. Use --pattern if this is really what you
want"""
_AT_LEAST_ONE_SERVER_MSG = """You need at least one server if you're
not using the --sql option"""
_NO_SERVERS_ALLOWED_MSG = """You should not include servers in the
call if you are using the --sql option"""
# A --pattern is required.
if not options.pattern:
parser.error("No pattern supplied.")
# Check that --sql option is not used with --server, and --server are
# supplied if --sql is not used.
if options.print_sql:
if options.server is not None and len(options.server) > 0:
parser.error(_NO_SERVERS_ALLOWED_MSG)
else:
if options.server is None or len(options.server) == 0:
parser.error(_AT_LEAST_ONE_SERVER_MSG)
object_types = re.split(r"\s*,\s*", options.object_types)
try:
command = ObjectGrep(options.pattern, options.database_pattern,
object_types, options.check_body,
options.use_regexp)
if options.print_sql:
print(command.sql())
else:
ssl_opts = get_ssl_dict(options)
command.execute(options.server, format=options.format,
charset=options.charset, ssl_opts=ssl_opts)
except UtilError:
_, err, _ = sys.exc_info()
sys.stderr.write("ERROR: {0}\n".format(err.errmsg))
sys.exit(1)
except:
_, details, _ = sys.exc_info()
sys.stderr.write("ERROR: {0}\n".format(details))
sys.exit(1)
sys.exit()
| apache-2.0 |
yunhaowang/IDP-APA | utilities/py_idpapa_stat.py | 1 | 3544 | #!/usr/bin/env python
import sys,re,time,argparse
def main(args):
# print >>sys.stdout, "Start analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
stat_list,known_sgt_na,known_sgt_pa,known_sgt_apa,known_mlt_na,known_mlt_pa,known_mlt_apa,novel_sgt_na,novel_sgt_pa,novel_sgt_apa,novel_mlt_na,novel_mlt_pa ,novel_mlt_apa = stat_results(args.input)
# print stat_list
print >>args.output, "Sample\APA type\tNA_no_polyA\tPA_with_one_polyA\tAPA_with_multiple_polyA\tTotal isoform number"
print >>args.output, "%s\t%s\t%s\t%s\t%s" % ("Known singleton isoform",str(known_sgt_na),str(known_sgt_pa),str(known_sgt_apa),str(known_sgt_na+known_sgt_pa+known_sgt_apa))
print >>args.output, "%s\t%s\t%s\t%s\t%s" % ("Known multi-exon isoform", str(known_mlt_na),str(known_mlt_pa),str(known_mlt_apa),str(known_mlt_na+known_mlt_pa+known_mlt_apa))
print >>args.output, "%s\t%s\t%s\t%s\t%s" % ("Novel singleton isoform", str(novel_sgt_na),str(novel_sgt_pa),str(novel_sgt_apa),str(novel_sgt_na+novel_sgt_pa+novel_sgt_apa))
print >>args.output, "%s\t%s\t%s\t%s\t%s" % ("Novel multi-exon isoform",str(novel_mlt_na),str(novel_mlt_pa),str(novel_mlt_apa),str(novel_mlt_na+novel_mlt_pa+novel_mlt_apa))
print >>args.output, "%s\t%s\t%s\t%s\t%s" % ("Total isoform number",str(known_sgt_na+known_mlt_na+novel_sgt_na+novel_mlt_na),str(known_sgt_pa+known_mlt_pa+novel_sgt_pa+novel_mlt_pa),str(known_sgt_apa+known_mlt_apa+novel_sgt_apa+novel_mlt_apa),str(sum(stat_list)))
# print >>sys.stdout, "Finish analysis: " + time.strftime("%a,%d %b %Y %H:%M:%S")
def stat_results(input_gpd):
head = 1
known_sgt_na = 0
known_sgt_pa = 0
known_sgt_apa = 0
known_mlt_na = 0
known_mlt_pa = 0
known_mlt_apa = 0
novel_sgt_na = 0
novel_sgt_pa = 0
novel_sgt_apa = 0
novel_mlt_na = 0
novel_mlt_pa = 0
novel_mlt_apa = 0
for line in input_gpd:
if head:
head -= 1
continue
gene_id,iso_id,chr,strand,tss,tts,cds_s,cds_e,exon_number,exon_start,exon_end,lr_pa_set,sr_pa_set,pa_set,pa_type = line.rstrip("\n").split("\t")
if int(exon_number) == 1:
if "novel_sgt_iso_" in iso_id:
if pa_type == "NA":
novel_sgt_na += 1
elif pa_type == "PA":
novel_sgt_pa += 1
else:
novel_sgt_apa += 1
else:
if pa_type == "NA":
known_sgt_na += 1
elif pa_type == "PA":
known_sgt_pa += 1
else:
known_sgt_apa += 1
else:
if "novel_mlt_iso_" in iso_id:
if pa_type == "NA":
novel_mlt_na += 1
elif pa_type == "PA":
novel_mlt_pa += 1
else:
novel_mlt_apa += 1
else:
if pa_type == "NA":
known_mlt_na += 1
elif pa_type == "PA":
known_mlt_pa += 1
else:
known_mlt_apa += 1
stat_list = [known_sgt_na,known_sgt_pa,known_sgt_apa,known_mlt_na,known_mlt_pa,known_mlt_apa,novel_sgt_na,novel_sgt_pa,novel_sgt_apa,novel_mlt_na,novel_mlt_pa,novel_mlt_apa]
return stat_list,known_sgt_na,known_sgt_pa,known_sgt_apa,known_mlt_na,known_mlt_pa,known_mlt_apa,novel_sgt_na,novel_sgt_pa,novel_sgt_apa,novel_mlt_na,novel_mlt_pa,novel_mlt_apa
input_gpd.close()
def do_inputs():
parser = argparse.ArgumentParser(description="Function: stat constructed isoforms and identified polyA sites",formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-i','--input',type=argparse.FileType('r'),required=True,help="Input: optimized isoforms with polyA site, gpd file")
parser.add_argument('-o','--output',type=argparse.FileType('w'),required=True,help="Output: final output file")
args = parser.parse_args()
return args
if __name__=="__main__":
args = do_inputs()
main(args)
| apache-2.0 |
runekaagaard/django-poeditor-com-field | poeditor_com_field/core.py | 1 | 6308 | from collections import namedtuple
from copy import deepcopy
import json
from celery.task.base import task
from celery.task.base import periodic_task
from celery.schedules import crontab
import requests
from django.conf import settings
from django.db.transaction import atomic
from django.core.validators import EMPTY_VALUES
from django.db.models.loading import get_models
import logging
logger = logging.getLogger(__name__)
from .models import Link
Term = namedtuple('Term', 'field_name,value,reference')
def post_init_signal(sender, instance=None, **kwargs):
instance._poeditor_com_field_cache = deepcopy(instance)
def post_save_signal(sender, created=None, instance=None, **kwargs):
update_terms(instance, created)
def pre_delete_signal(sender, instance=None, **kwargs):
remove_terms(instance)
def link_add(term):
Link.objects.select_for_update().filter(term=term)
try:
link = Link.objects.get(term=term.value)
if term.reference in link.references:
return None
else:
link.count += 1
link.references += term.reference
link.save()
return link
except Link.DoesNotExist:
return Link.objects.create(
term=term.value, count=1, references=term.reference)
def link_subtract(term):
Link.objects.select_for_update().filter(term=term)
try:
link = Link.objects.get(
term=term.value, )
link.count -= 1
link.references.replace(term.reference, '')
link.save()
return link
except Link.DoesNotExist:
return None
def changed_terms(instance, created):
deleted, added = [], []
for field_name in instance._poeditor_com_field_fields:
if not created:
before = make_term(instance._poeditor_com_field_cache, field_name)
after = make_term(instance, field_name)
if not created and before.value == after.value:
continue
if not created and before.value not in EMPTY_VALUES:
deleted.append(before)
if after.value not in EMPTY_VALUES:
added.append(after)
return deleted, added
def make_term(instance, field_name):
return Term(
field_name,
getattr(instance, field_name),
u'<{}.{} id={} field={} />'.format(instance._meta.app_label,
instance.__class__.__name__,
instance.pk, field_name), )
@atomic
def update_terms(instance, created):
deleted, added = changed_terms(instance, created)
modified_link_pks = set()
if added:
modified_link_pks |= set(
link_add(x).pk for x in added if x is not None)
if deleted:
modified_link_pks |= set(
link_subtract(x).pk for x in deleted if x is not None)
sync_links(list(modified_link_pks))
@atomic
def remove_terms(instance):
terms = [
make_term(instance, x) for x in instance._poeditor_com_field_fields
]
modified_link_pks = set(
link_subtract(x).pk for x in terms if x is not None)
sync_links(list(modified_link_pks))
def post(path, data):
r = requests.post(
'https://api.poeditor.com/v2/' + path,
data={
'id': settings.POEDITOR_PROJECT_ID,
'api_token': settings.POEDITOR_API_TOKEN,
'data': json.dumps(data).replace("\\r", ""),
}, )
try:
response = r.json()
if ('response' in response and 'status' in response['response'] and
response['response']['status'] == 'success'):
logger.info(u"Succes: path='{}'".format(path))
return True, response
else:
logger.error(
u"Error: path='{}', response='{}'".format(path, r.text))
return False, response
except ValueError:
logger.error(u"Error: path='{}', response='{}'".format(path, r.text))
return False, None
@task
def sync_links(link_pks=None):
links = Link.objects.filter(pk__in=link_pks)
if not links:
return
add = links.filter(count__gt=0, exists_on_server=False)
len(add) # Force evaluation.
update = links.filter(count__gt=0, exists_on_server=True)
len(update) # Force evaluation.
delete = links.filter(count__lt=1, exists_on_server=True)
len(delete) # Force evaluation.
if add:
with atomic():
add.select_for_update().update(in_sync_with_server=False)
status, _ = post('terms/add', [{
"term": x.term,
"context": settings.POEDITOR_CONTEXT,
"reference": x.references,
} for x in add])
if status:
add.update(exists_on_server=True, in_sync_with_server=True)
if update:
with atomic():
update.select_for_update().update(in_sync_with_server=False)
status, _ = post('terms/update', [{
"term": x.term,
"context": settings.POEDITOR_CONTEXT,
"reference": x.references,
} for x in update])
if status:
update.update(in_sync_with_server=True)
if delete:
with atomic():
delete.select_for_update().update(in_sync_with_server=False)
status, _ = post('terms/delete', [{
"term": x.term,
"context": settings.POEDITOR_CONTEXT,
} for x in delete])
if status:
delete.update(in_sync_with_server=True)
delete.delete()
def sync_existing_models():
for model in get_models():
try:
fields = model._poeditor_com_field_fields
except AttributeError:
continue
link_pks = []
for obj in model.objects.all():
for field in fields:
term = make_term(obj, field)
link = link_add(term)
if link is not None:
link_pks.append(link.pk)
sync_links(link_pks)
@periodic_task(run_every=crontab(minute=5))
def retry_sync_links():
logger.info("Cronjob: retry_sync_links called.")
sync_links(
Link.objects.filter(in_sync_with_server=False).values_list(
'pk', flat=True))
| mit |
jstammers/EDMSuite | NavPython/IronPython/Lib/sunau.py | 156 | 16537 | """Stuff to parse Sun and NeXT audio files.
An audio file consists of a header followed by the data. The structure
of the header is as follows.
+---------------+
| magic word |
+---------------+
| header size |
+---------------+
| data size |
+---------------+
| encoding |
+---------------+
| sample rate |
+---------------+
| # of channels |
+---------------+
| info |
| |
+---------------+
The magic word consists of the 4 characters '.snd'. Apart from the
info field, all header fields are 4 bytes in size. They are all
32-bit unsigned integers encoded in big-endian byte order.
The header size really gives the start of the data.
The data size is the physical size of the data. From the other
parameters the number of frames can be calculated.
The encoding gives the way in which audio samples are encoded.
Possible values are listed below.
The info field currently consists of an ASCII string giving a
human-readable description of the audio file. The info field is
padded with NUL bytes to the header size.
Usage.
Reading audio files:
f = sunau.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' or 'ULAW')
getcompname() -- returns human-readable version of
compression type ('not compressed' matches 'NONE')
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing audio files:
f = sunau.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
# from <multimedia/audio_filehdr.h>
AUDIO_FILE_MAGIC = 0x2e736e64
AUDIO_FILE_ENCODING_MULAW_8 = 1
AUDIO_FILE_ENCODING_LINEAR_8 = 2
AUDIO_FILE_ENCODING_LINEAR_16 = 3
AUDIO_FILE_ENCODING_LINEAR_24 = 4
AUDIO_FILE_ENCODING_LINEAR_32 = 5
AUDIO_FILE_ENCODING_FLOAT = 6
AUDIO_FILE_ENCODING_DOUBLE = 7
AUDIO_FILE_ENCODING_ADPCM_G721 = 23
AUDIO_FILE_ENCODING_ADPCM_G722 = 24
AUDIO_FILE_ENCODING_ADPCM_G723_3 = 25
AUDIO_FILE_ENCODING_ADPCM_G723_5 = 26
AUDIO_FILE_ENCODING_ALAW_8 = 27
# from <multimedia/audio_hdr.h>
AUDIO_UNKNOWN_SIZE = 0xFFFFFFFFL # ((unsigned)(~0))
_simple_encodings = [AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_LINEAR_8,
AUDIO_FILE_ENCODING_LINEAR_16,
AUDIO_FILE_ENCODING_LINEAR_24,
AUDIO_FILE_ENCODING_LINEAR_32,
AUDIO_FILE_ENCODING_ALAW_8]
class Error(Exception):
pass
def _read_u32(file):
x = 0L
for i in range(4):
byte = file.read(1)
if byte == '':
raise EOFError
x = x*256 + ord(byte)
return x
def _write_u32(file, x):
data = []
for i in range(4):
d, m = divmod(x, 256)
data.insert(0, m)
x = d
for i in range(4):
file.write(chr(int(data[i])))
class Au_read:
def __init__(self, f):
if type(f) == type(''):
import __builtin__
f = __builtin__.open(f, 'rb')
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._soundpos = 0
magic = int(_read_u32(file))
if magic != AUDIO_FILE_MAGIC:
raise Error, 'bad magic number'
self._hdr_size = int(_read_u32(file))
if self._hdr_size < 24:
raise Error, 'header size too small'
if self._hdr_size > 100:
raise Error, 'header size ridiculously large'
self._data_size = _read_u32(file)
if self._data_size != AUDIO_UNKNOWN_SIZE:
self._data_size = int(self._data_size)
self._encoding = int(_read_u32(file))
if self._encoding not in _simple_encodings:
raise Error, 'encoding not (yet) supported'
if self._encoding in (AUDIO_FILE_ENCODING_MULAW_8,
AUDIO_FILE_ENCODING_ALAW_8):
self._sampwidth = 2
self._framesize = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_8:
self._framesize = self._sampwidth = 1
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_16:
self._framesize = self._sampwidth = 2
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_24:
self._framesize = self._sampwidth = 3
elif self._encoding == AUDIO_FILE_ENCODING_LINEAR_32:
self._framesize = self._sampwidth = 4
else:
raise Error, 'unknown encoding'
self._framerate = int(_read_u32(file))
self._nchannels = int(_read_u32(file))
self._framesize = self._framesize * self._nchannels
if self._hdr_size > 24:
self._info = file.read(self._hdr_size - 24)
for i in range(len(self._info)):
if self._info[i] == '\0':
self._info = self._info[:i]
break
else:
self._info = ''
def getfp(self):
return self._file
def getnchannels(self):
return self._nchannels
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getnframes(self):
if self._data_size == AUDIO_UNKNOWN_SIZE:
return AUDIO_UNKNOWN_SIZE
if self._encoding in _simple_encodings:
return self._data_size / self._framesize
return 0 # XXX--must do some arithmetic here
def getcomptype(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'ULAW'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'ALAW'
else:
return 'NONE'
def getcompname(self):
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
return 'CCITT G.711 u-law'
elif self._encoding == AUDIO_FILE_ENCODING_ALAW_8:
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def readframes(self, nframes):
if self._encoding in _simple_encodings:
if nframes == AUDIO_UNKNOWN_SIZE:
data = self._file.read()
else:
data = self._file.read(nframes * self._framesize * self._nchannels)
if self._encoding == AUDIO_FILE_ENCODING_MULAW_8:
import audioop
data = audioop.ulaw2lin(data, self._sampwidth)
return data
return None # XXX--not implemented yet
def rewind(self):
self._soundpos = 0
self._file.seek(self._hdr_size)
def tell(self):
return self._soundpos
def setpos(self, pos):
if pos < 0 or pos > self.getnframes():
raise Error, 'position not in range'
self._file.seek(pos * self._framesize + self._hdr_size)
self._soundpos = pos
def close(self):
self._file = None
class Au_write:
def __init__(self, f):
if type(f) == type(''):
import __builtin__
f = __builtin__.open(f, 'wb')
self.initfp(f)
def __del__(self):
if self._file:
self.close()
def initfp(self, file):
self._file = file
self._framerate = 0
self._nchannels = 0
self._sampwidth = 0
self._framesize = 0
self._nframes = AUDIO_UNKNOWN_SIZE
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._info = ''
self._comptype = 'ULAW' # default is U-law
def setnchannels(self, nchannels):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels not in (1, 2, 4):
raise Error, 'only 1, 2, or 4 channels supported'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth not in (1, 2, 4):
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._framerate:
raise Error, 'sample width not specified'
return self._sampwidth
def setframerate(self, framerate):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._nframeswritten:
raise Error, 'cannot change parameters after starting to write'
if nframes < 0:
raise Error, '# of frames cannot be negative'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, type, name):
if type in ('NONE', 'ULAW'):
self._comptype = type
else:
raise Error, 'unknown compression type'
def getcomptype(self):
return self._comptype
def getcompname(self):
if self._comptype == 'ULAW':
return 'CCITT G.711 u-law'
elif self._comptype == 'ALAW':
return 'CCITT G.711 A-law'
else:
return 'not compressed'
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written()
nframes = len(data) / self._framesize
if self._comptype == 'ULAW':
import audioop
data = audioop.lin2ulaw(data, self._sampwidth)
self._file.write(data)
self._nframeswritten = self._nframeswritten + nframes
self._datawritten = self._datawritten + len(data)
def writeframes(self, data):
self.writeframesraw(data)
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
def close(self):
self._ensure_header_written()
if self._nframeswritten != self._nframes or \
self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
self._file = None
#
# private methods
#
def _ensure_header_written(self):
if not self._nframeswritten:
if not self._nchannels:
raise Error, '# of channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'frame rate not specified'
self._write_header()
def _write_header(self):
if self._comptype == 'NONE':
if self._sampwidth == 1:
encoding = AUDIO_FILE_ENCODING_LINEAR_8
self._framesize = 1
elif self._sampwidth == 2:
encoding = AUDIO_FILE_ENCODING_LINEAR_16
self._framesize = 2
elif self._sampwidth == 4:
encoding = AUDIO_FILE_ENCODING_LINEAR_32
self._framesize = 4
else:
raise Error, 'internal error'
elif self._comptype == 'ULAW':
encoding = AUDIO_FILE_ENCODING_MULAW_8
self._framesize = 1
else:
raise Error, 'internal error'
self._framesize = self._framesize * self._nchannels
_write_u32(self._file, AUDIO_FILE_MAGIC)
header_size = 25 + len(self._info)
header_size = (header_size + 7) & ~7
_write_u32(self._file, header_size)
if self._nframes == AUDIO_UNKNOWN_SIZE:
length = AUDIO_UNKNOWN_SIZE
else:
length = self._nframes * self._framesize
_write_u32(self._file, length)
self._datalength = length
_write_u32(self._file, encoding)
_write_u32(self._file, self._framerate)
_write_u32(self._file, self._nchannels)
self._file.write(self._info)
self._file.write('\0'*(header_size - len(self._info) - 24))
def _patchheader(self):
self._file.seek(8)
_write_u32(self._file, self._datawritten)
self._datalength = self._datawritten
self._file.seek(0, 2)
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Au_read(f)
elif mode in ('w', 'wb'):
return Au_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open
| mit |
dednal/chromium.src | tools/telemetry/telemetry/core/platform/profiler/vtune_profiler.py | 26 | 5288 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import subprocess
import sys
import tempfile
from telemetry.core import exceptions
from telemetry.core.platform import profiler
from telemetry.core.platform.profiler import android_profiling_helper
class _SingleProcessVTuneProfiler(object):
"""An internal class for using vtune for a given process."""
def __init__(self, pid, output_file, browser_backend, platform_backend):
self._pid = pid
self._browser_backend = browser_backend
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
cmd = ['amplxe-cl', '-collect', 'hotspots',
'-target-pid', str(pid), '-r', self._output_file]
self._is_android = platform_backend.GetOSName() == 'android'
if self._is_android:
cmd += ['-target-system', 'android']
self._proc = subprocess.Popen(
cmd, stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if 'renderer' in self._output_file:
try:
self._platform_backend.GetCommandLine(self._pid)
except exceptions.ProcessGoneException:
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
subprocess.call(['amplxe-cl', '-command', 'stop', '-r', self._output_file])
exit_code = self._proc.wait()
try:
# 1: amplxe: Error: Cannot find a running process with the specified ID.
# Provide a valid PID.
if exit_code not in (0, 1):
raise Exception(
'amplxe-cl failed with exit code %d. Output:\n%s' % (exit_code,
self._GetStdOut()))
finally:
self._tmp_output_file.close()
if exit_code:
# The renderer process was swapped out. Now that we made sure VTune has
# stopped, return without further processing the invalid profile.
return self._output_file
if self._is_android:
required_libs = \
android_profiling_helper.GetRequiredLibrariesForVTuneProfile(
self._output_file)
device = self._browser_backend.adb.device()
symfs_root = os.path.dirname(self._output_file)
android_profiling_helper.CreateSymFs(device,
symfs_root,
required_libs,
use_symlinks=True)
logging.info('Resolving symbols in profile.')
subprocess.call(['amplxe-cl', '-finalize', '-r', self._output_file,
'-search-dir', symfs_root])
print 'To view the profile, run:'
print ' amplxe-gui %s' % self._output_file
return self._output_file
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class VTuneProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(VTuneProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
has_renderer = False
for pid, output_file in process_output_file_map.iteritems():
if 'renderer' in output_file:
has_renderer = True
break
for pid, output_file in process_output_file_map.iteritems():
if has_renderer:
if not 'renderer' in output_file:
continue
elif not 'browser0' in output_file:
continue
self._process_profilers.append(
_SingleProcessVTuneProfiler(pid, output_file, browser_backend,
platform_backend))
@classmethod
def name(cls):
return 'vtune'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
if browser_type.startswith('cros'):
return False
try:
proc = subprocess.Popen(['amplxe-cl', '-version'],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
proc.communicate()
if proc.returncode != 0:
return False
if browser_type.startswith('android'):
# VTune checks if 'su' is available on the device.
proc = subprocess.Popen(['adb', 'shell', 'su', '-c', 'id'],
stderr=subprocess.STDOUT,
stdout=subprocess.PIPE)
return 'not found' not in proc.communicate()[0]
return True
except OSError:
return False
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging',
])
def CollectProfile(self):
print 'Processing profile, this will take a few minutes...'
output_files = []
for single_process in self._process_profilers:
output_files.append(single_process.CollectProfile())
return output_files
| bsd-3-clause |
wyc/django | tests/flatpages_tests/test_csrf.py | 290 | 4819 | from django.contrib.auth.models import User
from django.contrib.flatpages.models import FlatPage
from django.contrib.sites.models import Site
from django.test import Client, TestCase, modify_settings, override_settings
from .settings import FLATPAGES_TEMPLATES
@modify_settings(INSTALLED_APPS={'append': 'django.contrib.flatpages'})
@override_settings(
LOGIN_URL='/accounts/login/',
MIDDLEWARE_CLASSES=[
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware',
],
ROOT_URLCONF='flatpages_tests.urls',
CSRF_FAILURE_VIEW='django.views.csrf.csrf_failure',
TEMPLATES=FLATPAGES_TEMPLATES,
SITE_ID=1,
)
class FlatpageCSRFTests(TestCase):
@classmethod
def setUpTestData(cls):
# don't use the manager because we want to ensure the site exists
# with pk=1, regardless of whether or not it already exists.
cls.site1 = Site(pk=1, domain='example.com', name='example.com')
cls.site1.save()
cls.fp1 = FlatPage.objects.create(
url='/flatpage/', title='A Flatpage', content="Isn't it flat!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp2 = FlatPage.objects.create(
url='/location/flatpage/', title='A Nested Flatpage', content="Isn't it flat and deep!",
enable_comments=False, template_name='', registration_required=False
)
cls.fp3 = FlatPage.objects.create(
url='/sekrit/', title='Sekrit Flatpage', content="Isn't it sekrit!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp4 = FlatPage.objects.create(
url='/location/sekrit/', title='Sekrit Nested Flatpage', content="Isn't it sekrit and deep!",
enable_comments=False, template_name='', registration_required=True
)
cls.fp1.sites.add(cls.site1)
cls.fp2.sites.add(cls.site1)
cls.fp3.sites.add(cls.site1)
cls.fp4.sites.add(cls.site1)
def setUp(self):
self.client = Client(enforce_csrf_checks=True)
def test_view_flatpage(self):
"A flatpage can be served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_view_non_existent_flatpage(self):
"A non-existent flatpage raises 404 when served through a view, even when the middleware is in use"
response = self.client.get('/flatpage_root/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_view_authenticated_flatpage(self):
"A flatpage served through a view can require authentication"
response = self.client.get('/flatpage_root/sekrit/')
self.assertRedirects(response, '/accounts/login/?next=/flatpage_root/sekrit/')
User.objects.create_user('testuser', '[email protected]', 's3krit')
self.client.login(username='testuser', password='s3krit')
response = self.client.get('/flatpage_root/sekrit/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it sekrit!</p>")
def test_fallback_flatpage(self):
"A flatpage can be served by the fallback middleware"
response = self.client.get('/flatpage/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<p>Isn't it flat!</p>")
def test_fallback_non_existent_flatpage(self):
"A non-existent flatpage raises a 404 when served by the fallback middleware"
response = self.client.get('/no_such_flatpage/')
self.assertEqual(response.status_code, 404)
def test_post_view_flatpage(self):
"POSTing to a flatpage served through a view will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage_root/flatpage/')
self.assertEqual(response.status_code, 403)
def test_post_fallback_flatpage(self):
"POSTing to a flatpage served by the middleware will raise a CSRF error if no token is provided (Refs #14156)"
response = self.client.post('/flatpage/')
self.assertEqual(response.status_code, 403)
def test_post_unknown_page(self):
"POSTing to an unknown page isn't caught as a 403 CSRF error"
response = self.client.post('/no_such_page/')
self.assertEqual(response.status_code, 404)
| bsd-3-clause |
sravel/scripts | local/renameFile.py | 1 | 3902 | #!/usr/bin/python3.5
# -*- coding: utf-8 -*-
# @package renameFile.py
# @author Sebastien Ravel
"""
The renameFile script
=====================
:author: Sebastien Ravel
:contact: [email protected]
:date: 08/07/2016
:version: 0.1
Script description
------------------
This Programme rename files into a directory
Example
-------
>>> renameFile.py -s _ALL -p directory -r _toto:_TOTO,prefi_:newPrefix
Help Programm
-------------
optional arguments:
- \-h, --help
show this help message and exit
- \-v, --version
display renameFile.py version number and exit
- \-dd, --debug
enter verbose/debug mode
Input mandatory infos for running:
- \-d <path/to/directory>, --directory <path/to/directory>
path with files to rename
- \-r <OLD:NEW,OLD:NEW>, --replace <OLD:NEW,OLD:NEW>
Expression replace must be OLD:NEW and if multiple
replace use comma to separate
"""
##################################################
## Modules
##################################################
#Import MODULES_SEB
import sys, os
current_dir = os.path.dirname(os.path.abspath(__file__))+"/"
sys.path.insert(1,current_dir+'../modules/')
from MODULES_SEB import directory, replace_all, sort_human
## Python modules
import argparse
from time import localtime, strftime
##################################################
## Variables Globales
version="0.1"
VERSION_DATE='03/05/2016'
##################################################
## Main code
##################################################
if __name__ == "__main__":
# Initializations
start_time = strftime("%d-%m-%Y_%H:%M:%S", localtime())
# Parameters recovery
parser = argparse.ArgumentParser(prog='renameFile.py', description='''This Programme rename files in and directory''')
parser.add_argument('-v', '--version', action='version', version='You are using %(prog)s version: ' + version, help=\
'display renameFile version number and exit')
parser.add_argument('-dd', '--debug', action='store_false', dest='debug', help='enter verbose/debug mode')
filesreq = parser.add_argument_group('Input mandatory infos for running')
filesreq.add_argument('-d', '--directory', metavar="<path/to/directory>", type = directory, required=True, dest = 'dirPath', help = 'path with files to rename')
filesreq.add_argument('-r', '--replace', metavar="<OLD:NEW,OLD:NEW>", required=True, dest = 'replaceParam', help = 'Expression replace must be OLD:NEW and if multiple replace use comma to separate ')
args = parser.parse_args()
#Welcome message
print("#################################################################")
print("# Welcome in renameFile (Version " + version + ") #")
print("#################################################################")
print('Start time: ', start_time,'\n')
# Récupère le fichier de conf passer en argument
workingObjDir = args.dirPath
replaceParam = args.replaceParam
print("Workink Directory: %s" % workingObjDir.pathDirectory)
dicoReplace = {}
for value in replaceParam.split(","):
old,new = value.split(":")
dicoReplace[old] = new
for fileIn in sorted(workingObjDir.listFiles,key=sort_human):
basename = fileIn.split("/")[-1].split(".")[0]
extention = "."+".".join(fileIn.split("/")[-1].split(".")[1:])
newName = replace_all(dicoReplace, basename)
if not args.debug:
print("basename", basename)
print("extention", extention)
print("rename file:",basename+extention,"\tto\t",newName+extention,"\n\n")
if args.debug:
os.rename(fileIn , workingObjDir.pathDirectory+newName+extention)
print("\nStop time: ", strftime("%d-%m-%Y_%H:%M:%S", localtime()))
print("#################################################################")
print("# End of execution #")
print("#################################################################")
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.